code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' def a__ ( ): """simple docstring""" for n in range(1 , 1_00_00_00 ): yield n * (n + 1) // 2 def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 2 while i * i <= n: __SCREAMING_SNAKE_CASE = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def a__ ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(a__ ) > 5_00 ) if __name__ == "__main__": print(solution())
331
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__) @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase__ = 10000 lowerCAmelCase__ = None lowerCAmelCase__ = None class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase__ = ParquetConfig def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) __SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ): __SCREAMING_SNAKE_CASE = data_files if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] __SCREAMING_SNAKE_CASE = [] for split_name, files in data_files.items(): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) ) break splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) ) return splits def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table: """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' ) for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' ) raise
331
1
'''simple docstring''' def a__ ( a__ , a__ , a__ , a__ , a__ , a__ ): """simple docstring""" if index == r: for j in range(a__ ): print(data[j] , end=""" """ ) print(""" """ ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __SCREAMING_SNAKE_CASE = arr[i] combination_util(a__ , a__ , a__ , index + 1 , a__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(a__ , a__ , a__ , a__ , a__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = [0] * r # Print all combination using temporary array 'data[]' combination_util(a__ , a__ , a__ , 0 , a__ , 0 ) if __name__ == "__main__": # Driver code to check the function above UpperCAmelCase : List[Any] = [1_0, 2_0, 3_0, 4_0, 5_0] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
331
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCAmelCase : Any = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] UpperCAmelCase : Optional[Any] = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] UpperCAmelCase : Optional[int] = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[str] = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[Any] = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def a__ ( a__ , a__ ): """simple docstring""" for tf_name, hf_name in patterns: __SCREAMING_SNAKE_CASE = k.replace(a__ , a__ ) return k def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ ) __SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ ) __SCREAMING_SNAKE_CASE = torch_model.state_dict() __SCREAMING_SNAKE_CASE = {} # separating decoder weights __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = DECODER_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = REMAINING_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' __SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""] __SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ ) __SCREAMING_SNAKE_CASE = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = ["""global_step"""] for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ): __SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name ) if skip_key: continue __SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ ) __SCREAMING_SNAKE_CASE = array return tf_weights def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ ) __SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCAmelCase : int = parser.parse_args() UpperCAmelCase : Dict = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
331
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json', # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "blenderbot-small" lowerCAmelCase__ = ["past_key_values"] lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any=50_265 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : int=2_048 , __SCREAMING_SNAKE_CASE : str=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=8 , __SCREAMING_SNAKE_CASE : Any=2_048 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=0 , __SCREAMING_SNAKE_CASE : Any=1 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : str=2 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = encoder_ffn_dim __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = encoder_attention_heads __SCREAMING_SNAKE_CASE = decoder_ffn_dim __SCREAMING_SNAKE_CASE = decoder_layers __SCREAMING_SNAKE_CASE = decoder_attention_heads __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = init_std __SCREAMING_SNAKE_CASE = encoder_layerdrop __SCREAMING_SNAKE_CASE = decoder_layerdrop __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = encoder_layers __SCREAMING_SNAKE_CASE = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , is_encoder_decoder=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , forced_eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) class lowerCAmelCase__ ( a ): """simple docstring""" @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __SCREAMING_SNAKE_CASE = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __SCREAMING_SNAKE_CASE = {0: """batch"""} __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""} __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. __SCREAMING_SNAKE_CASE = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers for i in range(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""} __SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""} else: __SCREAMING_SNAKE_CASE = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def UpperCAmelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __SCREAMING_SNAKE_CASE = super().outputs else: __SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self ).outputs if self.use_past: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers for i in range(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""} __SCREAMING_SNAKE_CASE = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Generate decoder inputs __SCREAMING_SNAKE_CASE = seq_length if not self.use_past else 1 __SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()} __SCREAMING_SNAKE_CASE = dict(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape __SCREAMING_SNAKE_CASE = common_inputs["""decoder_input_ids"""].shape[1] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads __SCREAMING_SNAKE_CASE = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __SCREAMING_SNAKE_CASE = decoder_seq_length + 3 __SCREAMING_SNAKE_CASE = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __SCREAMING_SNAKE_CASE = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] , dim=1 ) __SCREAMING_SNAKE_CASE = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers __SCREAMING_SNAKE_CASE = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - min_num_layers __SCREAMING_SNAKE_CASE = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(__SCREAMING_SNAKE_CASE ): common_inputs["past_key_values"].append( ( torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE ), ) ) # TODO: test this. __SCREAMING_SNAKE_CASE = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): common_inputs["past_key_values"].append((torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) ) return common_inputs def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __SCREAMING_SNAKE_CASE = seqlen + 2 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_layers __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.num_attention_heads __SCREAMING_SNAKE_CASE = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""].dtype __SCREAMING_SNAKE_CASE = torch.cat( [common_inputs["""attention_mask"""], torch.ones(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE )] , dim=1 ) __SCREAMING_SNAKE_CASE = [ (torch.zeros(__SCREAMING_SNAKE_CASE ), torch.zeros(__SCREAMING_SNAKE_CASE )) for _ in range(__SCREAMING_SNAKE_CASE ) ] return common_inputs def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = compute_effective_axis_dimension( __SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __SCREAMING_SNAKE_CASE = tokenizer.num_special_tokens_to_add(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = compute_effective_axis_dimension( __SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__SCREAMING_SNAKE_CASE ) # Generate dummy inputs according to compute batch and sequence __SCREAMING_SNAKE_CASE = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size __SCREAMING_SNAKE_CASE = dict(tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE ) ) return common_inputs def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ) -> Mapping[str, Any]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE ) elif self.task == "causal-lm": __SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_causal_lm( __SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE ) return common_inputs def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: """simple docstring""" if self.task in ["default", "seq2seq-lm"]: __SCREAMING_SNAKE_CASE = super()._flatten_past_key_values_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
331
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(a ) class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} if prompt is not None: __SCREAMING_SNAKE_CASE = prompt if generate_kwargs is not None: __SCREAMING_SNAKE_CASE = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __SCREAMING_SNAKE_CASE = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) __SCREAMING_SNAKE_CASE = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError( f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. ' """Note also that one single text can be provided for conditional image to text generation.""" ) __SCREAMING_SNAKE_CASE = self.model.config.model_type if model_type == "git": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids __SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(__SCREAMING_SNAKE_CASE ) else: raise ValueError(f'Model type {model_type} does not support conditional text generation' ) else: __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __SCREAMING_SNAKE_CASE = None return model_inputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs["""input_ids"""] ) ): __SCREAMING_SNAKE_CASE = None if generate_kwargs is None: __SCREAMING_SNAKE_CASE = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name ) __SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs: __SCREAMING_SNAKE_CASE = { """generated_text""": self.tokenizer.decode( __SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , ) } records.append(__SCREAMING_SNAKE_CASE ) return records
331
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = ["input_values", "padding_mask"] def __init__( self : Any , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : int = 24_000 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : float = None , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> List[Any]: """simple docstring""" super().__init__(feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = chunk_length_s __SCREAMING_SNAKE_CASE = overlap @property def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[bool, str, PaddingStrategy]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) if padding and truncation: raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" ) elif padding is None: # by default let's pad the inputs __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = bool( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE ).T] # verify inputs are valid for idx, example in enumerate(__SCREAMING_SNAKE_CASE ): if example.ndim > 2: raise ValueError(f'Expected input shape (channels, length) but got shape {example.shape}' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'Expected mono audio but example has {example.shape[-1]} channels' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'Expected stereo audio but example has {example.shape[-1]} channels' ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = BatchFeature({"""input_values""": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: __SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: __SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio ) __SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) ) __SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length __SCREAMING_SNAKE_CASE = """max_length""" else: __SCREAMING_SNAKE_CASE = input_values # normal padding on batch if padded_inputs is None: __SCREAMING_SNAKE_CASE = self.pad( __SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , ) if padding: __SCREAMING_SNAKE_CASE = padded_inputs.pop("""attention_mask""" ) __SCREAMING_SNAKE_CASE = [] for example in padded_inputs.pop("""input_values""" ): if self.feature_size == 1: __SCREAMING_SNAKE_CASE = example[..., None] input_values.append(example.T ) __SCREAMING_SNAKE_CASE = input_values if return_tensors is not None: __SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(__SCREAMING_SNAKE_CASE ) return padded_inputs
331
'''simple docstring''' def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = len(a__ ) while cur > 1: # Find the maximum number in arr __SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi __SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )] # Reverse whole list __SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )] cur -= 1 return arr if __name__ == "__main__": UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase : str = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
331
1
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) # TODO: upload to AWS UpperCAmelCase : Optional[int] = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "retribert" def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any: """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = share_encoders __SCREAMING_SNAKE_CASE = projection_dim
331
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) ) __SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" ) __SCREAMING_SNAKE_CASE = """""" with open(a__ ) as f: __SCREAMING_SNAKE_CASE = f.readline() __SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] __SCREAMING_SNAKE_CASE = [ word for word in [sum(ord(a__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(a__ ) if __name__ == "__main__": print(solution())
331
1
'''simple docstring''' from manim import * class lowerCAmelCase__ ( a ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 ) __SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] __SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] __SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) __SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) __SCREAMING_SNAKE_CASE = VGroup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) __SCREAMING_SNAKE_CASE = Text("""CPU""" , font_size=24 ) __SCREAMING_SNAKE_CASE = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )] __SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) __SCREAMING_SNAKE_CASE = Text("""GPU""" , font_size=24 ) __SCREAMING_SNAKE_CASE = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE ) gpu.move_to([-1, -1, 0] ) self.add(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] __SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) __SCREAMING_SNAKE_CASE = Text("""Model""" , font_size=24 ) __SCREAMING_SNAKE_CASE = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=__SCREAMING_SNAKE_CASE ) model.move_to([3, -1.0, 0] ) self.add(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__SCREAMING_SNAKE_CASE ): rect.set_stroke(__SCREAMING_SNAKE_CASE ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) __SCREAMING_SNAKE_CASE = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__SCREAMING_SNAKE_CASE ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__SCREAMING_SNAKE_CASE , buff=0.0 ) self.add(__SCREAMING_SNAKE_CASE ) cpu_targs.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )] __SCREAMING_SNAKE_CASE = VGroup(*__SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , buff=0 ) __SCREAMING_SNAKE_CASE = Text("""Loaded Checkpoint""" , font_size=24 ) __SCREAMING_SNAKE_CASE = Group(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).arrange(__SCREAMING_SNAKE_CASE , aligned_edge=__SCREAMING_SNAKE_CASE , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) __SCREAMING_SNAKE_CASE = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __SCREAMING_SNAKE_CASE = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = MarkupText( f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(__SCREAMING_SNAKE_CASE , DOWN * 2.4 , aligned_edge=key_text.get_left() ) __SCREAMING_SNAKE_CASE = MarkupText( f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__SCREAMING_SNAKE_CASE ) , Write(__SCREAMING_SNAKE_CASE ) ) self.play(Write(__SCREAMING_SNAKE_CASE , run_time=1 ) , Create(__SCREAMING_SNAKE_CASE , run_time=1 ) ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = [] for i, rect in enumerate(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = fill.copy().set_fill(__SCREAMING_SNAKE_CASE , opacity=0.7 ) target.move_to(__SCREAMING_SNAKE_CASE ) first_animations.append(GrowFromCenter(__SCREAMING_SNAKE_CASE , run_time=1 ) ) __SCREAMING_SNAKE_CASE = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__SCREAMING_SNAKE_CASE , run_time=1.5 ) ) self.play(*__SCREAMING_SNAKE_CASE ) self.play(*__SCREAMING_SNAKE_CASE ) self.wait()
331
'''simple docstring''' class lowerCAmelCase__ : # Public class to implement a graph """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = row __SCREAMING_SNAKE_CASE = col __SCREAMING_SNAKE_CASE = graph def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1] __SCREAMING_SNAKE_CASE = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands. """simple docstring""" __SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )] __SCREAMING_SNAKE_CASE = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) count += 1 return count
331
1
'''simple docstring''' def a__ ( a__ , a__ ): """simple docstring""" if a < 0 or b < 0: raise ValueError("""the value of both inputs must be positive""" ) __SCREAMING_SNAKE_CASE = str(bin(a__ ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE = str(bin(a__ ) )[2:] # remove the leading "0b" __SCREAMING_SNAKE_CASE = max(len(a__ ) , len(a__ ) ) return "0b" + "".join( str(int(char_a != char_b ) ) for char_a, char_b in zip(a_binary.zfill(a__ ) , b_binary.zfill(a__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
331
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_choices def UpperCAmelCase__ ( self : Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_attention_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self ) @slow def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_flax class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) __SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_000 __SCREAMING_SNAKE_CASE = (1, 6, vocab_size) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
1
'''simple docstring''' import os def a__ ( ): """simple docstring""" with open(os.path.dirname(a__ ) + """/p022_names.txt""" ) as file: __SCREAMING_SNAKE_CASE = str(file.readlines()[0] ) __SCREAMING_SNAKE_CASE = names.replace("""\"""" , """""" ).split(""",""" ) names.sort() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i, name in enumerate(a__ ): for letter in name: name_score += ord(a__ ) - 64 total_score += (i + 1) * name_score __SCREAMING_SNAKE_CASE = 0 return total_score if __name__ == "__main__": print(solution())
331
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "markuplm" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple: """simple docstring""" super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout # additional properties __SCREAMING_SNAKE_CASE = max_depth __SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings __SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings __SCREAMING_SNAKE_CASE = tag_pad_id __SCREAMING_SNAKE_CASE = subs_pad_id __SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
331
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : int ) -> Dict: """simple docstring""" self.test() def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = False while not completed: if counter == 1: self.reset() __SCREAMING_SNAKE_CASE = self.advance() if not self.does_advance(__SCREAMING_SNAKE_CASE ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.update(__SCREAMING_SNAKE_CASE ) counter += 1 if counter > 10_000: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int ) -> List[str]: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Any: """simple docstring""" raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] ) -> Optional[int]: """simple docstring""" super(__SCREAMING_SNAKE_CASE , self ).__init__() if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) == 0: raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' ) if any((not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' ) __SCREAMING_SNAKE_CASE = token_ids __SCREAMING_SNAKE_CASE = len(self.token_ids ) __SCREAMING_SNAKE_CASE = -1 # the index of the currently fulfilled step __SCREAMING_SNAKE_CASE = False def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE )}' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE )}' ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False if self.does_advance(__SCREAMING_SNAKE_CASE ): self.fulfilled_idx += 1 __SCREAMING_SNAKE_CASE = True if self.fulfilled_idx == (self.seqlen - 1): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = completed else: # failed to make progress. __SCREAMING_SNAKE_CASE = True self.reset() return stepped, completed, reset def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = 0 def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple=False ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = PhrasalConstraint(self.token_ids ) if stateful: __SCREAMING_SNAKE_CASE = self.seqlen __SCREAMING_SNAKE_CASE = self.fulfilled_idx __SCREAMING_SNAKE_CASE = self.completed return new_constraint class lowerCAmelCase__ : """simple docstring""" def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[List[int]] , __SCREAMING_SNAKE_CASE : int=True ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = max([len(__SCREAMING_SNAKE_CASE ) for one in nested_token_ids] ) __SCREAMING_SNAKE_CASE = {} for token_ids in nested_token_ids: __SCREAMING_SNAKE_CASE = root for tidx, token_id in enumerate(__SCREAMING_SNAKE_CASE ): if token_id not in level: __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = level[token_id] if no_subsets and self.has_subsets(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f' {nested_token_ids}.' ) __SCREAMING_SNAKE_CASE = root def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.trie for current_token in current_seq: __SCREAMING_SNAKE_CASE = start[current_token] __SCREAMING_SNAKE_CASE = list(start.keys() ) return next_tokens def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.next_tokens(__SCREAMING_SNAKE_CASE ) return len(__SCREAMING_SNAKE_CASE ) == 0 def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = list(root.values() ) if len(__SCREAMING_SNAKE_CASE ) == 0: return 1 else: return sum([self.count_leaves(__SCREAMING_SNAKE_CASE ) for nn in next_nodes] ) def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.count_leaves(__SCREAMING_SNAKE_CASE ) return len(__SCREAMING_SNAKE_CASE ) != leaf_count class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[List[int]] ) -> int: """simple docstring""" super(__SCREAMING_SNAKE_CASE , self ).__init__() if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) == 0: raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' ) if any(not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for token_ids in nested_token_ids ): raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' ) if any( any((not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' ) __SCREAMING_SNAKE_CASE = DisjunctiveTrie(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = nested_token_ids __SCREAMING_SNAKE_CASE = self.trie.max_height __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = False def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq ) if len(__SCREAMING_SNAKE_CASE ) == 0: return None else: return token_list def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE )}' ) __SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE )}' ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False if self.does_advance(__SCREAMING_SNAKE_CASE ): self.current_seq.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = True else: __SCREAMING_SNAKE_CASE = True self.reset() __SCREAMING_SNAKE_CASE = self.trie.reached_leaf(self.current_seq ) __SCREAMING_SNAKE_CASE = completed return stepped, completed, reset def UpperCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = [] def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(self.token_ids ) if stateful: __SCREAMING_SNAKE_CASE = self.seqlen __SCREAMING_SNAKE_CASE = self.current_seq __SCREAMING_SNAKE_CASE = self.completed return new_constraint class lowerCAmelCase__ : """simple docstring""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[Constraint] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = constraints # max # of steps required to fulfill a given constraint __SCREAMING_SNAKE_CASE = max([c.seqlen for c in constraints] ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = False self.init_state() def UpperCAmelCase__ ( self : Dict ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = [constraint.copy(stateful=__SCREAMING_SNAKE_CASE ) for constraint in self.constraints] def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __SCREAMING_SNAKE_CASE = constraint.advance() if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): token_list.append(__SCREAMING_SNAKE_CASE ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): token_list.extend(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = self.inprogress_constraint.advance() if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): token_list.append(__SCREAMING_SNAKE_CASE ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): token_list.extend(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) == 0: return None else: return token_list def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[List[int]] ) -> str: """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.add(__SCREAMING_SNAKE_CASE ) # the entire list of constraints are fulfilled if self.completed: break def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Dict: """simple docstring""" if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False, False if self.completed: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.inprogress_constraint.update(__SCREAMING_SNAKE_CASE ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __SCREAMING_SNAKE_CASE = None if len(self.pending_constraints ) == 0: # we're done! __SCREAMING_SNAKE_CASE = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pending_constraint.update(__SCREAMING_SNAKE_CASE ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = None if not complete and stepped: __SCREAMING_SNAKE_CASE = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __SCREAMING_SNAKE_CASE = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __SCREAMING_SNAKE_CASE = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=True ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __SCREAMING_SNAKE_CASE = [ constraint.copy(stateful=__SCREAMING_SNAKE_CASE ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __SCREAMING_SNAKE_CASE = self.inprogress_constraint.copy(stateful=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [constraint.copy() for constraint in self.pending_constraints] return new_state
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class lowerCAmelCase__ ( unittest.TestCase , a ): """simple docstring""" def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = load_tool("""text-to-speech""" ) self.tool.setup() def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = self.tool("""hey""" ) __SCREAMING_SNAKE_CASE = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = self.tool("""hey""" ) __SCREAMING_SNAKE_CASE = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
331
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
1
'''simple docstring''' import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = VideoToVideoSDPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"} lowerCAmelCase__ = False # No `output_type`. lowerCAmelCase__ = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def UpperCAmelCase__ ( self : int ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, } return components def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=0 ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """video""": video, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """pt""", } return inputs def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = sd_pipe.to(__SCREAMING_SNAKE_CASE ) sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """np""" __SCREAMING_SNAKE_CASE = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames __SCREAMING_SNAKE_CASE = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=5E-3 ) @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" pass @unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" ) def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" return super().test_progress_bar() @slow @skip_mps class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames __SCREAMING_SNAKE_CASE = torch.Generator(device="""cpu""" ).manual_seed(0 ) __SCREAMING_SNAKE_CASE = torch.randn((1, 10, 3, 1_024, 576) , generator=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = video.to("""cuda""" ) __SCREAMING_SNAKE_CASE = """Spiderman is surfing""" __SCREAMING_SNAKE_CASE = pipe(__SCREAMING_SNAKE_CASE , video=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=3 , output_type="""pt""" ).frames __SCREAMING_SNAKE_CASE = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
331
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = relative_attention __SCREAMING_SNAKE_CASE = position_biased_input __SCREAMING_SNAKE_CASE = pos_att_type __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 300 return config def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
331
1
'''simple docstring''' import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json', } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "align_text_model" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[str]=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : Tuple=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : List[Any]=3_072 , __SCREAMING_SNAKE_CASE : Optional[int]="gelu" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=512 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-12 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : str="absolute" , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = pad_token_id @classmethod def UpperCAmelCase__ ( cls : int , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": __SCREAMING_SNAKE_CASE = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "align_vision_model" def __init__( self : int , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 600 , __SCREAMING_SNAKE_CASE : float = 2.0 , __SCREAMING_SNAKE_CASE : float = 3.1 , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : List[int] = [3, 3, 5, 3, 5, 5, 3] , __SCREAMING_SNAKE_CASE : List[int] = [32, 16, 24, 40, 80, 112, 192] , __SCREAMING_SNAKE_CASE : List[int] = [16, 24, 40, 80, 112, 192, 320] , __SCREAMING_SNAKE_CASE : List[int] = [] , __SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 2, 1, 2, 1] , __SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 3, 3, 4, 1] , __SCREAMING_SNAKE_CASE : List[int] = [1, 6, 6, 6, 6, 6, 6] , __SCREAMING_SNAKE_CASE : float = 0.25 , __SCREAMING_SNAKE_CASE : str = "swish" , __SCREAMING_SNAKE_CASE : int = 2_560 , __SCREAMING_SNAKE_CASE : str = "mean" , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : float = 0.001 , __SCREAMING_SNAKE_CASE : float = 0.99 , __SCREAMING_SNAKE_CASE : float = 0.2 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[int]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = width_coefficient __SCREAMING_SNAKE_CASE = depth_coefficient __SCREAMING_SNAKE_CASE = depth_divisor __SCREAMING_SNAKE_CASE = kernel_sizes __SCREAMING_SNAKE_CASE = in_channels __SCREAMING_SNAKE_CASE = out_channels __SCREAMING_SNAKE_CASE = depthwise_padding __SCREAMING_SNAKE_CASE = strides __SCREAMING_SNAKE_CASE = num_block_repeats __SCREAMING_SNAKE_CASE = expand_ratios __SCREAMING_SNAKE_CASE = squeeze_expansion_ratio __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dim __SCREAMING_SNAKE_CASE = pooling_type __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = batch_norm_eps __SCREAMING_SNAKE_CASE = batch_norm_momentum __SCREAMING_SNAKE_CASE = drop_connect_rate __SCREAMING_SNAKE_CASE = sum(__SCREAMING_SNAKE_CASE ) * 4 @classmethod def UpperCAmelCase__ ( cls : Tuple , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : Tuple ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from AlignConfig if config_dict.get("""model_type""" ) == "align": __SCREAMING_SNAKE_CASE = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "align" lowerCAmelCase__ = True def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Any=640 , __SCREAMING_SNAKE_CASE : Tuple=1.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , **__SCREAMING_SNAKE_CASE : Dict , ) -> Union[str, Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) if text_config is None: __SCREAMING_SNAKE_CASE = {} logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" ) if vision_config is None: __SCREAMING_SNAKE_CASE = {} logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" ) __SCREAMING_SNAKE_CASE = AlignTextConfig(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AlignVisionConfig(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = projection_dim __SCREAMING_SNAKE_CASE = temperature_init_value __SCREAMING_SNAKE_CASE = initializer_range @classmethod def UpperCAmelCase__ ( cls : str , __SCREAMING_SNAKE_CASE : AlignTextConfig , __SCREAMING_SNAKE_CASE : AlignVisionConfig , **__SCREAMING_SNAKE_CASE : Dict ) -> List[Any]: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE = self.text_config.to_dict() __SCREAMING_SNAKE_CASE = self.vision_config.to_dict() __SCREAMING_SNAKE_CASE = self.__class__.model_type return output
331
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ ) __SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. __SCREAMING_SNAKE_CASE = sum(single_char_strings.values() ) # one length string __SCREAMING_SNAKE_CASE = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __SCREAMING_SNAKE_CASE = single_char_strings[ch] __SCREAMING_SNAKE_CASE = my_str / all_sum my_fir_sum += prob * math.loga(a__ ) # entropy formula. # print entropy print(F'{round(-1 * my_fir_sum ):.1f}' ) # two len string __SCREAMING_SNAKE_CASE = sum(two_char_strings.values() ) __SCREAMING_SNAKE_CASE = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __SCREAMING_SNAKE_CASE = cha + cha if sequence in two_char_strings: __SCREAMING_SNAKE_CASE = two_char_strings[sequence] __SCREAMING_SNAKE_CASE = int(a__ ) / all_sum my_sec_sum += prob * math.loga(a__ ) # print second entropy print(F'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = Counter() # type: ignore __SCREAMING_SNAKE_CASE = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(a__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
331
1
'''simple docstring''' from itertools import product def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = sides_number __SCREAMING_SNAKE_CASE = max_face_number * dice_number __SCREAMING_SNAKE_CASE = [0] * (max_total + 1) __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = range(a__ , max_face_number + 1 ) for dice_numbers in product(a__ , repeat=a__ ): __SCREAMING_SNAKE_CASE = sum(a__ ) totals_frequencies[total] += 1 return totals_frequencies def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = total_frequency_distribution( sides_number=4 , dice_number=9 ) __SCREAMING_SNAKE_CASE = total_frequency_distribution( sides_number=6 , dice_number=6 ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 9 __SCREAMING_SNAKE_CASE = 4 * 9 __SCREAMING_SNAKE_CASE = 6 for peter_total in range(a__ , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) __SCREAMING_SNAKE_CASE = (4**9) * (6**6) __SCREAMING_SNAKE_CASE = peter_wins_count / total_games_number __SCREAMING_SNAKE_CASE = round(a__ , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f"""{solution() = }""")
331
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def a__ ( a__ ): """simple docstring""" return x + 2 class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} ) __SCREAMING_SNAKE_CASE = """x = y""" __SCREAMING_SNAKE_CASE = {"""y""": 5} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 5, """y""": 5} ) def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = """y = add_two(x)""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result is None assert "tried to execute add_two" in out.out def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} ) def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3\ny = 5""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 2} ) __SCREAMING_SNAKE_CASE = {"""x""": 8} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 8, """y""": 5} ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """y = x""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 3} ) def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} ) __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""range""": range} , state=__SCREAMING_SNAKE_CASE ) assert result == 2 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 2, """i""": 2} )
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Optional[Any] = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
331
'''simple docstring''' import os def a__ ( a__ = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file: __SCREAMING_SNAKE_CASE = [ [int(a__ ) for element in line.split(""",""" )] for line in input_file.readlines() ] __SCREAMING_SNAKE_CASE = len(a__ ) __SCREAMING_SNAKE_CASE = len(matrix[0] ) __SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )] for i in range(a__ ): __SCREAMING_SNAKE_CASE = matrix[i][0] for j in range(1 , a__ ): for i in range(a__ ): __SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , a__ ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f"""{solution() = }""")
331
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : str ) -> List[Any]: """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): __SCREAMING_SNAKE_CASE = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sgugger/tiny-distilbert-classification""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , only_pretrain_model=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , torchscript=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) # set architectures equal to `None` __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" ) def UpperCAmelCase__ ( self : Any ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , fpaa=__SCREAMING_SNAKE_CASE , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Tuple ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tinier_bart""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : str ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tinier_bart""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase__ ( self : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , save_to_csv=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """train_time.csv""" ) , env_info_csv_file=os.path.join(__SCREAMING_SNAKE_CASE , """env.csv""" ) , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) benchmark.run() self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """env.csv""" ) ).exists() ) def UpperCAmelCase__ ( self : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__SCREAMING_SNAKE_CASE : Optional[Any] ): self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """sequential""" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """cumulative""" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """current""" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=__SCREAMING_SNAKE_CASE , inference=__SCREAMING_SNAKE_CASE , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__SCREAMING_SNAKE_CASE , """log.txt""" ) , log_print=__SCREAMING_SNAKE_CASE , trace_memory_line_by_line=__SCREAMING_SNAKE_CASE , multi_process=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(__SCREAMING_SNAKE_CASE , """log.txt""" ) ).exists() )
331
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Any = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : Optional[Any] = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Dict = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys()) UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCAmelCase__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir ) __SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = config __SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute' setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) ) if tokenizer is None: __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = tokenizer __SCREAMING_SNAKE_CASE = MODEL_MODES[mode] if model is None: __SCREAMING_SNAKE_CASE = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = model def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler] __SCREAMING_SNAKE_CASE = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1} return scheduler def UpperCAmelCase__ ( self : int ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model __SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""] __SCREAMING_SNAKE_CASE = [ { """params""": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters """weight_decay""": self.hparams.weight_decay, }, { """params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] if self.hparams.adafactor: __SCREAMING_SNAKE_CASE = Adafactor( __SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = AdamW( __SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __SCREAMING_SNAKE_CASE = optimizer __SCREAMING_SNAKE_CASE = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: """simple docstring""" return self.validation_end(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: """simple docstring""" if stage == "test": __SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset ) else: __SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int: """simple docstring""" raise NotImplementedError("""You must implement this for your task""" ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" return self.train_loader def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" return os.path.join( self.hparams.data_dir , """cached_{}_{}_{}""".format( __SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" ) __SCREAMING_SNAKE_CASE = self.step_count self.model.save_pretrained(__SCREAMING_SNAKE_CASE ) self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) @staticmethod def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int: """simple docstring""" parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument( """--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , ) parser.add_argument( """--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , ) parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" ) parser.add_argument( """--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" ) parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--adafactor""" , action="""store_true""" ) class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__SCREAMING_SNAKE_CASE ) class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""] __SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]: """simple docstring""" rank_zero_info("""***** Validation results *****""" ) __SCREAMING_SNAKE_CASE = trainer.callback_metrics # Log results for key in sorted(__SCREAMING_SNAKE_CASE ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str: """simple docstring""" rank_zero_info("""***** Test results *****""" ) __SCREAMING_SNAKE_CASE = trainer.callback_metrics # Log and save results to file __SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer: for key in sorted(__SCREAMING_SNAKE_CASE ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) def a__ ( a__ , a__ ): """simple docstring""" parser.add_argument( """--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=a__ , default="""O2""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ ) parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" ) parser.add_argument( """--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" ) parser.add_argument( """--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , ) def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=a__ ) # add custom checkpoints if checkpoint_callback is None: __SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(a__ ) if logging_callback is None: __SCREAMING_SNAKE_CASE = LoggingCallback() __SCREAMING_SNAKE_CASE = {} if args.fpaa: __SCREAMING_SNAKE_CASE = 16 if args.gpus > 1: __SCREAMING_SNAKE_CASE = """auto""" __SCREAMING_SNAKE_CASE = """ddp""" __SCREAMING_SNAKE_CASE = args.accumulate_grad_batches __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = """auto""" __SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args( a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , ) if args.do_train: trainer.fit(a__ ) else: print("""RAG modeling tests with new set functions successfuly executed!""" ) return trainer
331
1
'''simple docstring''' import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED UpperCAmelCase : Optional[Any] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } UpperCAmelCase : Dict = { 'allenai/led-base-16384': 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) __SCREAMING_SNAKE_CASE = bs[:] __SCREAMING_SNAKE_CASE = 0 for b in range(2**8 ): if b not in bs: bs.append(a__ ) cs.append(2**8 + n ) n += 1 __SCREAMING_SNAKE_CASE = [chr(a__ ) for n in cs] return dict(zip(a__ , a__ ) ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = set() __SCREAMING_SNAKE_CASE = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __SCREAMING_SNAKE_CASE = char return pairs class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ["input_ids", "attention_mask"] def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict="replace" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="<unk>" , __SCREAMING_SNAKE_CASE : List[Any]="<pad>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , __SCREAMING_SNAKE_CASE : int=False , **__SCREAMING_SNAKE_CASE : Any , ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else bos_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else eos_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else sep_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else cls_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else unk_token __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle: __SCREAMING_SNAKE_CASE = json.load(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()} __SCREAMING_SNAKE_CASE = errors # how to handle errors in decoding __SCREAMING_SNAKE_CASE = bytes_to_unicode() __SCREAMING_SNAKE_CASE = {v: k for k, v in self.byte_encoder.items()} with open(__SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as merges_handle: __SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""" )[1:-1] __SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in bpe_merges] __SCREAMING_SNAKE_CASE = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __SCREAMING_SNAKE_CASE = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def UpperCAmelCase__ ( self : List[str] ) -> str: """simple docstring""" return len(self.encoder ) def UpperCAmelCase__ ( self : int ) -> int: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: """simple docstring""" if token in self.cache: return self.cache[token] __SCREAMING_SNAKE_CASE = tuple(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = get_pairs(__SCREAMING_SNAKE_CASE ) if not pairs: return token while True: __SCREAMING_SNAKE_CASE = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bigram __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 while i < len(__SCREAMING_SNAKE_CASE ): try: __SCREAMING_SNAKE_CASE = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __SCREAMING_SNAKE_CASE = j if word[i] == first and i < len(__SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __SCREAMING_SNAKE_CASE = tuple(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = new_word if len(__SCREAMING_SNAKE_CASE ) == 1: break else: __SCREAMING_SNAKE_CASE = get_pairs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """ """.join(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = word return word def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for token in re.findall(self.pat , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE ).split(""" """ ) ) return bpe_tokens def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> int: """simple docstring""" return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> int: """simple docstring""" return self.decoder.get(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """""".join(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE ) + """\n""" ) __SCREAMING_SNAKE_CASE = 0 with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) __SCREAMING_SNAKE_CASE = token_index writer.write(""" """.join(__SCREAMING_SNAKE_CASE ) + """\n""" ) index += 1 return vocab_file, merge_file def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] __SCREAMING_SNAKE_CASE = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE ) if token_ids_a is None: return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict=False , **__SCREAMING_SNAKE_CASE : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()): __SCREAMING_SNAKE_CASE = """ """ + text return (text, kwargs) def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[Dict[str, EncodedInput], BatchEncoding] , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , ) -> dict: """simple docstring""" __SCREAMING_SNAKE_CASE = super()._pad( encoded_inputs=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding_strategy=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , ) # Load from model defaults if return_attention_mask is None: __SCREAMING_SNAKE_CASE = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __SCREAMING_SNAKE_CASE = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __SCREAMING_SNAKE_CASE = len(encoded_inputs["""global_attention_mask"""] ) != len(__SCREAMING_SNAKE_CASE ) if needs_to_be_padded: __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __SCREAMING_SNAKE_CASE = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": __SCREAMING_SNAKE_CASE = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
331
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = (DDPMScheduler,) def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = { """num_train_timesteps""": 1_000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: __SCREAMING_SNAKE_CASE = -1 else: __SCREAMING_SNAKE_CASE = timesteps[i + 1] __SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
331
1
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = ["audio_values", "audio_mask"] def __init__( self : str , __SCREAMING_SNAKE_CASE : Tuple=2_048 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=[16, 16] , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=44_100 , __SCREAMING_SNAKE_CASE : Union[str, Any]=86 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2_048 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Optional[int]: """simple docstring""" super().__init__( feature_size=__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , padding_value=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = spectrogram_length __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = feature_size // self.patch_size[1] __SCREAMING_SNAKE_CASE = n_fft __SCREAMING_SNAKE_CASE = sampling_rate // hop_length_to_sampling_rate __SCREAMING_SNAKE_CASE = sampling_rate __SCREAMING_SNAKE_CASE = padding_value __SCREAMING_SNAKE_CASE = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__SCREAMING_SNAKE_CASE , norm="""slaney""" , mel_scale="""slaney""" , ).T def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.array ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE = spectrogram( __SCREAMING_SNAKE_CASE , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) __SCREAMING_SNAKE_CASE = log_spec[:, :-1] __SCREAMING_SNAKE_CASE = log_spec - 20.0 __SCREAMING_SNAKE_CASE = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = True , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , **__SCREAMING_SNAKE_CASE : int , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled' f' with {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __SCREAMING_SNAKE_CASE = isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __SCREAMING_SNAKE_CASE = is_batched_numpy or ( isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ): __SCREAMING_SNAKE_CASE = np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) elif isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __SCREAMING_SNAKE_CASE = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __SCREAMING_SNAKE_CASE = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __SCREAMING_SNAKE_CASE = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [np.asarray(__SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __SCREAMING_SNAKE_CASE = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __SCREAMING_SNAKE_CASE = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __SCREAMING_SNAKE_CASE = np.array(__SCREAMING_SNAKE_CASE ).astype(np.floataa ) # convert into correct format for padding __SCREAMING_SNAKE_CASE = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __SCREAMING_SNAKE_CASE = np.ones([len(__SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = padded_audio_features * self.padding_value for i in range(len(__SCREAMING_SNAKE_CASE ) ): __SCREAMING_SNAKE_CASE = audio_features[i] __SCREAMING_SNAKE_CASE = feature # return as BatchFeature if return_attention_mask: __SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: __SCREAMING_SNAKE_CASE = {"""audio_values""": padded_audio_features} __SCREAMING_SNAKE_CASE = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) return encoded_inputs
331
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase : Dict = TypeVar('T') def a__ ( a__ ): """simple docstring""" return (position - 1) // 2 def a__ ( a__ ): """simple docstring""" return (2 * position) + 1 def a__ ( a__ ): """simple docstring""" return (2 * position) + 2 class lowerCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __len__( self : Optional[Any] ) -> int: """simple docstring""" return self.elements def __repr__( self : List[str] ) -> str: """simple docstring""" return str(self.heap ) def UpperCAmelCase__ ( self : Tuple ) -> bool: """simple docstring""" return self.elements == 0 def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" self.heap.append((elem, weight) ) __SCREAMING_SNAKE_CASE = self.elements self.elements += 1 self._bubble_up(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> T: """simple docstring""" if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0] self._bubble_down(__SCREAMING_SNAKE_CASE ) return elem def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE = (elem, weight) if position > 0: __SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] if curr_pos == 0: return None __SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_up(__SCREAMING_SNAKE_CASE ) return None def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE = get_child_left_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = get_child_right_position(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements and child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) else: return None if child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) return None def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __SCREAMING_SNAKE_CASE = nodea_pos __SCREAMING_SNAKE_CASE = nodea_pos class lowerCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : Union[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __repr__( self : Dict ) -> str: """simple docstring""" return str(self.connections ) def __len__( self : Dict ) -> int: """simple docstring""" return self.nodes def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" if node not in self.connections: __SCREAMING_SNAKE_CASE = {} self.nodes += 1 def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" self.add_node(__SCREAMING_SNAKE_CASE ) self.add_node(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = weight __SCREAMING_SNAKE_CASE = weight def a__ ( a__ , ): """simple docstring""" __SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections} __SCREAMING_SNAKE_CASE = {node: None for node in graph.connections} __SCREAMING_SNAKE_CASE = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(a__ , a__ ) if priority_queue.is_empty(): return dist, parent # initialization __SCREAMING_SNAKE_CASE = priority_queue.extract_min() __SCREAMING_SNAKE_CASE = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(a__ , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node # running prim's algorithm while not priority_queue.is_empty(): __SCREAMING_SNAKE_CASE = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(a__ , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node return dist, parent
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase : int = { 'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTBigCodeForSequenceClassification', 'GPTBigCodeForTokenClassification', 'GPTBigCodeForCausalLM', 'GPTBigCodeModel', 'GPTBigCodePreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
331
'''simple docstring''' from __future__ import annotations from cmath import sqrt def a__ ( a__ , a__ , a__ ): """simple docstring""" if a == 0: raise ValueError("""Coefficient 'a' must not be zero.""" ) __SCREAMING_SNAKE_CASE = b * b - 4 * a * c __SCREAMING_SNAKE_CASE = (-b + sqrt(a__ )) / (2 * a) __SCREAMING_SNAKE_CASE = (-b - sqrt(a__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 ) print(F'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
331
1
'''simple docstring''' import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def a__ ( a__ , a__ , a__=None ): """simple docstring""" assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' __SCREAMING_SNAKE_CASE = nn.Parameter(a__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' __SCREAMING_SNAKE_CASE = nn.Parameter(a__ ) def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = np.asarray(weights[0] ) __SCREAMING_SNAKE_CASE = np.asarray(weights[1] ) __SCREAMING_SNAKE_CASE = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , ) set_param( torch_layer.output.dense , torch.tensor(a__ ).view(-1 , a__ ).contiguous().transpose(0 , 1 ) , ) def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = np.asarray(weights[0] ) __SCREAMING_SNAKE_CASE = np.asarray(weights[1] ) __SCREAMING_SNAKE_CASE = np.asarray(weights[2] ) __SCREAMING_SNAKE_CASE = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(a__ ).transpose(1 , 2 ).contiguous().view(-1 , a__ ) , ) set_param( torch_layer.output.dense , torch.tensor(a__ ).view(-1 , a__ ).contiguous().transpose(0 , 1 ) , ) def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = weights[0][0][0] __SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[0] ) __SCREAMING_SNAKE_CASE = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(a__ ) , torch.tensor(a__ ) , ) # lsh weights + output __SCREAMING_SNAKE_CASE = weights[0][1] if len(a__ ) < 4: set_layer_weights_in_torch_lsh(a__ , torch_block.attention , a__ ) else: set_layer_weights_in_torch_local(a__ , torch_block.attention , a__ ) # intermediate weighs __SCREAMING_SNAKE_CASE = weights[2][0][1][2] # Chunked Feed Forward if len(a__ ) == 4: __SCREAMING_SNAKE_CASE = intermediate_weights[2] # layernorm 2 __SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][0] ) __SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(a__ ) , torch.tensor(a__ ) , ) # intermediate dense __SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][0] ) __SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(a__ ).transpose(0 , 1 ).contiguous() , torch.tensor(a__ ) , ) # intermediate out __SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][0] ) __SCREAMING_SNAKE_CASE = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(a__ ).transpose(0 , 1 ).contiguous() , torch.tensor(a__ ) , ) def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = torch_model.reformer # word embeds __SCREAMING_SNAKE_CASE = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(a__ ) , ) if isinstance(weights[3] , a__ ): __SCREAMING_SNAKE_CASE = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): __SCREAMING_SNAKE_CASE = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' __SCREAMING_SNAKE_CASE = nn.Parameter(torch.tensor(a__ ) ) __SCREAMING_SNAKE_CASE = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( a__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): __SCREAMING_SNAKE_CASE = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(a__ , a__ , a__ ) # output layer norm __SCREAMING_SNAKE_CASE = np.asarray(weights[7][0] ) __SCREAMING_SNAKE_CASE = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(a__ ) , torch.tensor(a__ ) , ) # output embeddings __SCREAMING_SNAKE_CASE = np.asarray(weights[9][0] ) __SCREAMING_SNAKE_CASE = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(a__ ).transpose(0 , 1 ).contiguous() , torch.tensor(a__ ) , ) def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = ReformerConfig.from_json_file(a__ ) print(F'Building PyTorch model from configuration: {config}' ) __SCREAMING_SNAKE_CASE = ReformerModelWithLMHead(a__ ) with open(a__ , """rb""" ) as f: __SCREAMING_SNAKE_CASE = pickle.load(a__ )["""weights"""] set_model_weights_in_torch(a__ , a__ , config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , a__ ) if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase : Dict = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
331
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) # TODO: upload to AWS UpperCAmelCase : Optional[int] = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "retribert" def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any: """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = share_encoders __SCREAMING_SNAKE_CASE = projection_dim
331
1
'''simple docstring''' from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : Tuple = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "efficientformer" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] = [3, 2, 6, 4] , __SCREAMING_SNAKE_CASE : List[int] = [48, 96, 224, 448] , __SCREAMING_SNAKE_CASE : List[bool] = [True, True, True, True] , __SCREAMING_SNAKE_CASE : int = 448 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 4 , __SCREAMING_SNAKE_CASE : int = 7 , __SCREAMING_SNAKE_CASE : int = 5 , __SCREAMING_SNAKE_CASE : int = 8 , __SCREAMING_SNAKE_CASE : int = 4 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 16 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 2 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : float = 1E-5 , __SCREAMING_SNAKE_CASE : str = "gelu" , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : float = 1E-12 , __SCREAMING_SNAKE_CASE : int = 224 , __SCREAMING_SNAKE_CASE : float = 1E-05 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = hidden_sizes __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = patch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = depths __SCREAMING_SNAKE_CASE = mlp_expansion_ratio __SCREAMING_SNAKE_CASE = downsamples __SCREAMING_SNAKE_CASE = dim __SCREAMING_SNAKE_CASE = key_dim __SCREAMING_SNAKE_CASE = attention_ratio __SCREAMING_SNAKE_CASE = resolution __SCREAMING_SNAKE_CASE = pool_size __SCREAMING_SNAKE_CASE = downsample_patch_size __SCREAMING_SNAKE_CASE = downsample_stride __SCREAMING_SNAKE_CASE = downsample_pad __SCREAMING_SNAKE_CASE = drop_path_rate __SCREAMING_SNAKE_CASE = num_metaad_blocks __SCREAMING_SNAKE_CASE = distillation __SCREAMING_SNAKE_CASE = use_layer_scale __SCREAMING_SNAKE_CASE = layer_scale_init_value __SCREAMING_SNAKE_CASE = image_size __SCREAMING_SNAKE_CASE = batch_norm_eps
331
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = AltDiffusionPipeline lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) __SCREAMING_SNAKE_CASE = 77 __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]: """simple docstring""" if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A photo of an astronaut""" __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" ) __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
331
1
'''simple docstring''' from __future__ import annotations def a__ ( a__ ): """simple docstring""" return [ord(a__ ) - 96 for elem in plain] def a__ ( a__ ): """simple docstring""" return "".join(chr(elem + 96 ) for elem in encoded ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """ , a__ ) print("""Decoded:""" , decode(a__ ) ) if __name__ == "__main__": main()
331
'''simple docstring''' import argparse import os import re import packaging.version UpperCAmelCase : Optional[int] = 'examples/' UpperCAmelCase : List[str] = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCAmelCase : Union[str, Any] = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } UpperCAmelCase : Tuple = 'README.md' def a__ ( a__ , a__ , a__ ): """simple docstring""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern] __SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ ) __SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ ) with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(a__ ) def a__ ( a__ ): """simple docstring""" for folder, directories, fnames in os.walk(a__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" ) def a__ ( a__ , a__=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(a__ , a__ , a__ ) if not patch: update_version_in_examples(a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures""" __SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.readlines() # Find the start of the list. __SCREAMING_SNAKE_CASE = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): __SCREAMING_SNAKE_CASE = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(a__ ) def a__ ( ): """simple docstring""" with open(REPLACE_FILES["""init"""] , """r""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0] return packaging.version.parse(a__ ) def a__ ( a__=False ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: __SCREAMING_SNAKE_CASE = default_version.base_version elif patch: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. __SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = default_version print(F'Updating version to {version}.' ) global_version_update(a__ , patch=a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() __SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0' __SCREAMING_SNAKE_CASE = current_version.base_version # Check with the user we got that right. __SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = dev_version print(F'Updating version to {version}.' ) global_version_update(a__ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCAmelCase : Dict = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
331
1
'''simple docstring''' def a__ ( a__ ): """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') UpperCAmelCase : Union[str, Any] = int(input('Enter number: ').strip()) print(f"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
331
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : """simple docstring""" def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 300 return config def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""MRA does not output attentions""" ) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" return @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_265 __SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_265 __SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
1
'''simple docstring''' from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def a__ ( a__ ): """simple docstring""" if isinstance(a__ , collections.abc.Iterable ): return x return (x, x) @require_tf class lowerCAmelCase__ : """simple docstring""" def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: """simple docstring""" pass def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" pass def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = {"""vision_model""": vision_model, """text_model""": text_model} __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = after_output[0].numpy() __SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5 ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Any ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model( input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.vision_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.image_size ) __SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.patch_size ) __SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __SCREAMING_SNAKE_CASE = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __SCREAMING_SNAKE_CASE = output.text_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = np.abs((a - b) ).max() self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , f'Difference between torch and flax is {diff} (>= {tol}).' ) def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() self.check_save_load(**__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_pretrained_model_and_inputs() __SCREAMING_SNAKE_CASE = model_a(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model_a(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = after_outputs[0].numpy() __SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5 ) @require_tf class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) __SCREAMING_SNAKE_CASE = 13 __SCREAMING_SNAKE_CASE = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] ) __SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = TFViTModel(__SCREAMING_SNAKE_CASE , name="""vision_model""" ) __SCREAMING_SNAKE_CASE = TFBertModel(__SCREAMING_SNAKE_CASE , name="""text_model""" ) return vision_model, text_model def UpperCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFViTModelTester(self ) __SCREAMING_SNAKE_CASE = TFBertModelTester(self ) __SCREAMING_SNAKE_CASE = vit_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = vision_config_and_inputs ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) __SCREAMING_SNAKE_CASE = 13 __SCREAMING_SNAKE_CASE = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] ) __SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model( input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.vision_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.image_size ) __SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.patch_size ) __SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __SCREAMING_SNAKE_CASE = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __SCREAMING_SNAKE_CASE = output.text_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFDeiTModel(__SCREAMING_SNAKE_CASE , name="""vision_model""" ) __SCREAMING_SNAKE_CASE = TFRobertaModel(__SCREAMING_SNAKE_CASE , name="""text_model""" ) return vision_model, text_model def UpperCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = TFDeiTModelTester(self ) __SCREAMING_SNAKE_CASE = TFRobertaModelTester(self ) __SCREAMING_SNAKE_CASE = vit_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = vision_config_and_inputs ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) __SCREAMING_SNAKE_CASE = 13 __SCREAMING_SNAKE_CASE = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] ) __SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = TFCLIPVisionModel(__SCREAMING_SNAKE_CASE , name="""vision_model""" ) __SCREAMING_SNAKE_CASE = TFBertModel(__SCREAMING_SNAKE_CASE , name="""text_model""" ) return vision_model, text_model def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFCLIPVisionModelTester(self ) __SCREAMING_SNAKE_CASE = TFBertModelTester(self ) __SCREAMING_SNAKE_CASE = clip_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = vision_config_and_inputs ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __SCREAMING_SNAKE_CASE = processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="""np""" ) __SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __SCREAMING_SNAKE_CASE = np.array([[1.2284727, 0.3104122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
331
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__) @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase__ = 10000 lowerCAmelCase__ = None lowerCAmelCase__ = None class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase__ = ParquetConfig def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) __SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ): __SCREAMING_SNAKE_CASE = data_files if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] __SCREAMING_SNAKE_CASE = [] for split_name, files in data_files.items(): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) ) break splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) ) return splits def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table: """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' ) for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' ) raise
331
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Any = { 'edbeeching/decision-transformer-gym-hopper-medium': ( 'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "decision_transformer" lowerCAmelCase__ = ["past_key_values"] lowerCAmelCase__ = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=17 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : str=128 , __SCREAMING_SNAKE_CASE : List[str]=4_096 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : Tuple=1_024 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict="relu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Dict=1E-5 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=50_256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=50_256 , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : str=False , **__SCREAMING_SNAKE_CASE : int , ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = state_dim __SCREAMING_SNAKE_CASE = act_dim __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = max_ep_len __SCREAMING_SNAKE_CASE = action_tanh __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = n_positions __SCREAMING_SNAKE_CASE = n_layer __SCREAMING_SNAKE_CASE = n_head __SCREAMING_SNAKE_CASE = n_inner __SCREAMING_SNAKE_CASE = activation_function __SCREAMING_SNAKE_CASE = resid_pdrop __SCREAMING_SNAKE_CASE = embd_pdrop __SCREAMING_SNAKE_CASE = attn_pdrop __SCREAMING_SNAKE_CASE = layer_norm_epsilon __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = scale_attn_weights __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx __SCREAMING_SNAKE_CASE = reorder_and_upcast_attn __SCREAMING_SNAKE_CASE = bos_token_id __SCREAMING_SNAKE_CASE = eos_token_id super().__init__(bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
331
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCAmelCase : Any = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] UpperCAmelCase : Optional[Any] = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] UpperCAmelCase : Optional[int] = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[str] = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[Any] = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def a__ ( a__ , a__ ): """simple docstring""" for tf_name, hf_name in patterns: __SCREAMING_SNAKE_CASE = k.replace(a__ , a__ ) return k def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ ) __SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ ) __SCREAMING_SNAKE_CASE = torch_model.state_dict() __SCREAMING_SNAKE_CASE = {} # separating decoder weights __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = DECODER_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = REMAINING_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' __SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""] __SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ ) __SCREAMING_SNAKE_CASE = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = ["""global_step"""] for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ): __SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name ) if skip_key: continue __SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ ) __SCREAMING_SNAKE_CASE = array return tf_weights def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ ) __SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCAmelCase : int = parser.parse_args() UpperCAmelCase : Dict = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
331
1
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = (DDPMScheduler,) def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = { """num_train_timesteps""": 1_000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: __SCREAMING_SNAKE_CASE = -1 else: __SCREAMING_SNAKE_CASE = timesteps[i + 1] __SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
331
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(a ) class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} if prompt is not None: __SCREAMING_SNAKE_CASE = prompt if generate_kwargs is not None: __SCREAMING_SNAKE_CASE = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __SCREAMING_SNAKE_CASE = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) __SCREAMING_SNAKE_CASE = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError( f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. ' """Note also that one single text can be provided for conditional image to text generation.""" ) __SCREAMING_SNAKE_CASE = self.model.config.model_type if model_type == "git": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids __SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(__SCREAMING_SNAKE_CASE ) else: raise ValueError(f'Model type {model_type} does not support conditional text generation' ) else: __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __SCREAMING_SNAKE_CASE = None return model_inputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs["""input_ids"""] ) ): __SCREAMING_SNAKE_CASE = None if generate_kwargs is None: __SCREAMING_SNAKE_CASE = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name ) __SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs: __SCREAMING_SNAKE_CASE = { """generated_text""": self.tokenizer.decode( __SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , ) } records.append(__SCREAMING_SNAKE_CASE ) return records
331
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Union[str, Any] = { 'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegatronBertForCausalLM', 'MegatronBertForMaskedLM', 'MegatronBertForMultipleChoice', 'MegatronBertForNextSentencePrediction', 'MegatronBertForPreTraining', 'MegatronBertForQuestionAnswering', 'MegatronBertForSequenceClassification', 'MegatronBertForTokenClassification', 'MegatronBertModel', 'MegatronBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
331
'''simple docstring''' def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = len(a__ ) while cur > 1: # Find the maximum number in arr __SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi __SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )] # Reverse whole list __SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )] cur -= 1 return arr if __name__ == "__main__": UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase : str = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
331
1
'''simple docstring''' import os def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = os.path.join(os.path.dirname(a__ ) , """num.txt""" ) with open(a__ ) as file_hand: return str(sum(int(a__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
331
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) ) __SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" ) __SCREAMING_SNAKE_CASE = """""" with open(a__ ) as f: __SCREAMING_SNAKE_CASE = f.readline() __SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] __SCREAMING_SNAKE_CASE = [ word for word in [sum(ord(a__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(a__ ) if __name__ == "__main__": print(solution())
331
1
'''simple docstring''' import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase : Any = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = XLMProphetNetTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = True def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = """[PAD]""" __SCREAMING_SNAKE_CASE = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """[PAD]""" ) self.assertEqual(vocab_keys[1] , """[CLS]""" ) self.assertEqual(vocab_keys[-1] , """j""" ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 1_012 ) def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_012 ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) __SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ) self.assertListEqual( __SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """[UNK]""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """[UNK]""", """.""", ] , ) @cached_property def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" return XLMProphetNetTokenizer.from_pretrained("""microsoft/xprophetnet-large-wiki100-cased""" ) @slow def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """Hello World!""" __SCREAMING_SNAKE_CASE = [35_389, 6_672, 49, 2] self.assertListEqual(__SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(__SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = {"""input_ids""": [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__SCREAMING_SNAKE_CASE , model_name="""microsoft/xprophetnet-large-wiki100-cased""" , revision="""1acad1643ddd54a44df6a1b797ada8373685d90e""" , )
331
'''simple docstring''' class lowerCAmelCase__ : # Public class to implement a graph """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = row __SCREAMING_SNAKE_CASE = col __SCREAMING_SNAKE_CASE = graph def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1] __SCREAMING_SNAKE_CASE = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands. """simple docstring""" __SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )] __SCREAMING_SNAKE_CASE = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) count += 1 return count
331
1
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def a__ ( a__ , a__ , a__ , a__ , a__ = None , a__ = None , a__ = None , ): """simple docstring""" if config_name_or_path is None: __SCREAMING_SNAKE_CASE = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base""" if generator_tokenizer_name_or_path is None: __SCREAMING_SNAKE_CASE = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: __SCREAMING_SNAKE_CASE = question_encoder_name_or_path __SCREAMING_SNAKE_CASE = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration # Save model. __SCREAMING_SNAKE_CASE = RagConfig.from_pretrained(a__ ) __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a__ ) __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(a__ ) __SCREAMING_SNAKE_CASE = gen_config __SCREAMING_SNAKE_CASE = question_encoder_config __SCREAMING_SNAKE_CASE = model_class.from_pretrained_question_encoder_generator( a__ , a__ , config=a__ ) rag_model.save_pretrained(a__ ) # Sanity check. model_class.from_pretrained(a__ ) # Save tokenizers. __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ ) gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a__ ) question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" ) if __name__ == "__main__": UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) UpperCAmelCase : Optional[int] = parser.parse_args() UpperCAmelCase : Dict = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
331
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_choices def UpperCAmelCase__ ( self : Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_attention_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self ) @slow def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_flax class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) __SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_000 __SCREAMING_SNAKE_CASE = (1, 6, vocab_size) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class lowerCAmelCase__ : """simple docstring""" lowerCAmelCase__ = MBartConfig lowerCAmelCase__ = {} lowerCAmelCase__ = "gelu" def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=13 , __SCREAMING_SNAKE_CASE : Optional[int]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : List[str]=99 , __SCREAMING_SNAKE_CASE : List[str]=32 , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=20 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=1 , __SCREAMING_SNAKE_CASE : List[str]=0 , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return config, inputs_dict def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=__SCREAMING_SNAKE_CASE ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def a__ ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ): """simple docstring""" if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(a__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase__ = ( { "conversational": TFMBartForConditionalGeneration, "feature-extraction": TFMBartModel, "summarization": TFMBartForConditionalGeneration, "text2text-generation": TFMBartForConditionalGeneration, "translation": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Dict ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__SCREAMING_SNAKE_CASE ) @require_sentencepiece @require_tokenizers @require_tf class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = [ " UN Chief Says There Is No Military Solution in Syria", ] lowerCAmelCase__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", ] lowerCAmelCase__ = "facebook/mbart-large-en-ro" @cached_property def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self : Optional[int] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def UpperCAmelCase__ ( self : List[str] , **__SCREAMING_SNAKE_CASE : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**__SCREAMING_SNAKE_CASE ) self.assertListEqual(self.expected_text , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__SCREAMING_SNAKE_CASE , return_tensors="""tf""" ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) return generated_words @slow def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" self._assert_generated_batch_equal_expected()
331
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "markuplm" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple: """simple docstring""" super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout # additional properties __SCREAMING_SNAKE_CASE = max_depth __SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings __SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings __SCREAMING_SNAKE_CASE = tag_pad_id __SCREAMING_SNAKE_CASE = subs_pad_id __SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
331
1
'''simple docstring''' import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase : int = logging.get_logger(__name__) class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "vision-encoder-decoder" lowerCAmelCase__ = True def __init__( self : Any , **__SCREAMING_SNAKE_CASE : Dict ) -> Any: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'A configuraton of type {self.model_type} cannot be instantiated because ' f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' ) __SCREAMING_SNAKE_CASE = kwargs.pop("""encoder""" ) __SCREAMING_SNAKE_CASE = encoder_config.pop("""model_type""" ) __SCREAMING_SNAKE_CASE = kwargs.pop("""decoder""" ) __SCREAMING_SNAKE_CASE = decoder_config.pop("""model_type""" ) __SCREAMING_SNAKE_CASE = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = AutoConfig.for_model(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = True @classmethod def UpperCAmelCase__ ( cls : int , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , **__SCREAMING_SNAKE_CASE : Tuple ) -> PretrainedConfig: """simple docstring""" logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE = self.encoder.to_dict() __SCREAMING_SNAKE_CASE = self.decoder.to_dict() __SCREAMING_SNAKE_CASE = self.__class__.model_type return output class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = version.parse("1.11" ) @property def UpperCAmelCase__ ( self : str ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def UpperCAmelCase__ ( self : List[str] ) -> float: """simple docstring""" return 1E-4 @property def UpperCAmelCase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class lowerCAmelCase__ ( a ): """simple docstring""" @property def UpperCAmelCase__ ( self : Any ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" __SCREAMING_SNAKE_CASE = OrderedDict() __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : "PreTrainedTokenizerBase" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ) -> Mapping[str, Any]: """simple docstring""" import torch __SCREAMING_SNAKE_CASE = OrderedDict() __SCREAMING_SNAKE_CASE = super().generate_dummy_inputs( __SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , is_pair=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dummy_input["""input_ids"""].shape __SCREAMING_SNAKE_CASE = (batch, encoder_sequence, self._config.encoder_hidden_size) __SCREAMING_SNAKE_CASE = dummy_input.pop("""input_ids""" ) __SCREAMING_SNAKE_CASE = dummy_input.pop("""attention_mask""" ) __SCREAMING_SNAKE_CASE = torch.zeros(__SCREAMING_SNAKE_CASE ) return common_inputs class lowerCAmelCase__ ( a ): """simple docstring""" @property def UpperCAmelCase__ ( self : str ) -> None: """simple docstring""" pass def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : PretrainedConfig ) -> OnnxConfig: """simple docstring""" return VisionEncoderDecoderEncoderOnnxConfig(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : PretrainedConfig , __SCREAMING_SNAKE_CASE : str = "default" ) -> OnnxConfig: """simple docstring""" __SCREAMING_SNAKE_CASE = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
331
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
331
1
'''simple docstring''' def a__ ( a__ ): """simple docstring""" if not isinstance(a__ , a__ ): __SCREAMING_SNAKE_CASE = F'Input value of [number={number}] must be an integer' raise TypeError(a__ ) if number < 0: return False __SCREAMING_SNAKE_CASE = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
331
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
1
'''simple docstring''' import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowerCAmelCase__ ( a ): """simple docstring""" def __get__( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]: """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError("""unreadable attribute""" ) __SCREAMING_SNAKE_CASE = """__cached_""" + self.fget.__name__ __SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if cached is None: __SCREAMING_SNAKE_CASE = self.fget(__SCREAMING_SNAKE_CASE ) setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return cached def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'invalid truth value {val!r}' ) def a__ ( a__ ): """simple docstring""" if is_torch_fx_proxy(a__ ): return True if is_torch_available(): import torch if isinstance(a__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(a__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(a__ , (jnp.ndarray, Tracer) ): return True return isinstance(a__ , np.ndarray ) def a__ ( a__ ): """simple docstring""" return isinstance(a__ , np.ndarray ) def a__ ( a__ ): """simple docstring""" return _is_numpy(a__ ) def a__ ( a__ ): """simple docstring""" import torch return isinstance(a__ , torch.Tensor ) def a__ ( a__ ): """simple docstring""" return False if not is_torch_available() else _is_torch(a__ ) def a__ ( a__ ): """simple docstring""" import torch return isinstance(a__ , torch.device ) def a__ ( a__ ): """simple docstring""" return False if not is_torch_available() else _is_torch_device(a__ ) def a__ ( a__ ): """simple docstring""" import torch if isinstance(a__ , a__ ): if hasattr(a__ , a__ ): __SCREAMING_SNAKE_CASE = getattr(a__ , a__ ) else: return False return isinstance(a__ , torch.dtype ) def a__ ( a__ ): """simple docstring""" return False if not is_torch_available() else _is_torch_dtype(a__ ) def a__ ( a__ ): """simple docstring""" import tensorflow as tf return isinstance(a__ , tf.Tensor ) def a__ ( a__ ): """simple docstring""" return False if not is_tf_available() else _is_tensorflow(a__ ) def a__ ( a__ ): """simple docstring""" import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(a__ , """is_symbolic_tensor""" ): return tf.is_symbolic_tensor(a__ ) return type(a__ ) == tf.Tensor def a__ ( a__ ): """simple docstring""" return False if not is_tf_available() else _is_tf_symbolic_tensor(a__ ) def a__ ( a__ ): """simple docstring""" import jax.numpy as jnp # noqa: F811 return isinstance(a__ , jnp.ndarray ) def a__ ( a__ ): """simple docstring""" return False if not is_flax_available() else _is_jax(a__ ) def a__ ( a__ ): """simple docstring""" if isinstance(a__ , (dict, UserDict) ): return {k: to_py_obj(a__ ) for k, v in obj.items()} elif isinstance(a__ , (list, tuple) ): return [to_py_obj(a__ ) for o in obj] elif is_tf_tensor(a__ ): return obj.numpy().tolist() elif is_torch_tensor(a__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(a__ ): return np.asarray(a__ ).tolist() elif isinstance(a__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def a__ ( a__ ): """simple docstring""" if isinstance(a__ , (dict, UserDict) ): return {k: to_numpy(a__ ) for k, v in obj.items()} elif isinstance(a__ , (list, tuple) ): return np.array(a__ ) elif is_tf_tensor(a__ ): return obj.numpy() elif is_torch_tensor(a__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(a__ ): return np.asarray(a__ ) else: return obj class lowerCAmelCase__ ( a ): """simple docstring""" def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = fields(self ) # Safety and consistency checks if not len(__SCREAMING_SNAKE_CASE ): raise ValueError(f'{self.__class__.__name__} has no fields.' ) if not all(field.default is None for field in class_fields[1:] ): raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' ) __SCREAMING_SNAKE_CASE = getattr(self , class_fields[0].name ) __SCREAMING_SNAKE_CASE = all(getattr(self , field.name ) is None for field in class_fields[1:] ) if other_fields_are_none and not is_tensor(__SCREAMING_SNAKE_CASE ): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = first_field.items() __SCREAMING_SNAKE_CASE = True else: try: __SCREAMING_SNAKE_CASE = iter(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = True except TypeError: __SCREAMING_SNAKE_CASE = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__SCREAMING_SNAKE_CASE ): if ( not isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) or not len(__SCREAMING_SNAKE_CASE ) == 2 or not isinstance(element[0] , __SCREAMING_SNAKE_CASE ) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute __SCREAMING_SNAKE_CASE = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' ) break setattr(self , element[0] , element[1] ) if element[1] is not None: __SCREAMING_SNAKE_CASE = element[1] elif first_field is not None: __SCREAMING_SNAKE_CASE = first_field else: for field in class_fields: __SCREAMING_SNAKE_CASE = getattr(self , field.name ) if v is not None: __SCREAMING_SNAKE_CASE = v def __delitem__( self : List[Any] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: """simple docstring""" raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' ) def UpperCAmelCase__ ( self : Dict , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]: """simple docstring""" raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' ) def UpperCAmelCase__ ( self : Tuple , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]: """simple docstring""" raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' ) def UpperCAmelCase__ ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]: """simple docstring""" raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' ) def __getitem__( self : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> str: """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = dict(self.items() ) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def __setitem__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple ) -> Union[str, Any]: """simple docstring""" super().__setitem__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Dict ) -> Tuple[Any]: """simple docstring""" return tuple(self[k] for k in self.keys() ) class lowerCAmelCase__ ( a , a ): """simple docstring""" @classmethod def UpperCAmelCase__ ( cls : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" raise ValueError( f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' ) class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "longest" lowerCAmelCase__ = "max_length" lowerCAmelCase__ = "do_not_pad" class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "pt" lowerCAmelCase__ = "tf" lowerCAmelCase__ = "np" lowerCAmelCase__ = "jax" class lowerCAmelCase__ : """simple docstring""" def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[ContextManager] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = context_managers __SCREAMING_SNAKE_CASE = ExitStack() def __enter__( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(__SCREAMING_SNAKE_CASE ) def __exit__( self : List[str] , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple: """simple docstring""" self.stack.__exit__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = infer_framework(a__ ) if framework == "tf": __SCREAMING_SNAKE_CASE = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __SCREAMING_SNAKE_CASE = inspect.signature(model_class.forward ) # PyTorch models else: __SCREAMING_SNAKE_CASE = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = model_class.__name__ __SCREAMING_SNAKE_CASE = infer_framework(a__ ) if framework == "tf": __SCREAMING_SNAKE_CASE = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": __SCREAMING_SNAKE_CASE = inspect.signature(model_class.forward ) # PyTorch models else: __SCREAMING_SNAKE_CASE = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def a__ ( a__ , a__ = "" , a__ = "." ): """simple docstring""" def _flatten_dict(a__ , a__="" , a__="." ): for k, v in d.items(): __SCREAMING_SNAKE_CASE = str(a__ ) + delimiter + str(a__ ) if parent_key else k if v and isinstance(a__ , a__ ): yield from flatten_dict(a__ , a__ , delimiter=a__ ).items() else: yield key, v return dict(_flatten_dict(a__ , a__ , a__ ) ) @contextmanager def a__ ( a__ , a__ = False ): """simple docstring""" if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def a__ ( a__ , a__=None ): """simple docstring""" if is_numpy_array(a__ ): return np.transpose(a__ , axes=a__ ) elif is_torch_tensor(a__ ): return array.T if axes is None else array.permute(*a__ ) elif is_tf_tensor(a__ ): import tensorflow as tf return tf.transpose(a__ , perm=a__ ) elif is_jax_tensor(a__ ): return jnp.transpose(a__ , axes=a__ ) else: raise ValueError(F'Type not supported for transpose: {type(a__ )}.' ) def a__ ( a__ , a__ ): """simple docstring""" if is_numpy_array(a__ ): return np.reshape(a__ , a__ ) elif is_torch_tensor(a__ ): return array.reshape(*a__ ) elif is_tf_tensor(a__ ): import tensorflow as tf return tf.reshape(a__ , a__ ) elif is_jax_tensor(a__ ): return jnp.reshape(a__ , a__ ) else: raise ValueError(F'Type not supported for reshape: {type(a__ )}.' ) def a__ ( a__ , a__=None ): """simple docstring""" if is_numpy_array(a__ ): return np.squeeze(a__ , axis=a__ ) elif is_torch_tensor(a__ ): return array.squeeze() if axis is None else array.squeeze(dim=a__ ) elif is_tf_tensor(a__ ): import tensorflow as tf return tf.squeeze(a__ , axis=a__ ) elif is_jax_tensor(a__ ): return jnp.squeeze(a__ , axis=a__ ) else: raise ValueError(F'Type not supported for squeeze: {type(a__ )}.' ) def a__ ( a__ , a__ ): """simple docstring""" if is_numpy_array(a__ ): return np.expand_dims(a__ , a__ ) elif is_torch_tensor(a__ ): return array.unsqueeze(dim=a__ ) elif is_tf_tensor(a__ ): import tensorflow as tf return tf.expand_dims(a__ , axis=a__ ) elif is_jax_tensor(a__ ): return jnp.expand_dims(a__ , axis=a__ ) else: raise ValueError(F'Type not supported for expand_dims: {type(a__ )}.' ) def a__ ( a__ ): """simple docstring""" if is_numpy_array(a__ ): return np.size(a__ ) elif is_torch_tensor(a__ ): return array.numel() elif is_tf_tensor(a__ ): import tensorflow as tf return tf.size(a__ ) elif is_jax_tensor(a__ ): return array.size else: raise ValueError(F'Type not supported for expand_dims: {type(a__ )}.' ) def a__ ( a__ , a__ ): """simple docstring""" for key, value in auto_map.items(): if isinstance(a__ , (tuple, list) ): __SCREAMING_SNAKE_CASE = [F'{repo_id}--{v}' if (v is not None and """--""" not in v) else v for v in value] elif value is not None and "--" not in value: __SCREAMING_SNAKE_CASE = F'{repo_id}--{value}' return auto_map def a__ ( a__ ): """simple docstring""" for base_class in inspect.getmro(a__ ): __SCREAMING_SNAKE_CASE = base_class.__module__ __SCREAMING_SNAKE_CASE = base_class.__name__ if module.startswith("""tensorflow""" ) or module.startswith("""keras""" ) or name == "TFPreTrainedModel": return "tf" elif module.startswith("""torch""" ) or name == "PreTrainedModel": return "pt" elif module.startswith("""flax""" ) or module.startswith("""jax""" ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'Could not infer framework from class {model_class}.' )
331
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = relative_attention __SCREAMING_SNAKE_CASE = position_biased_input __SCREAMING_SNAKE_CASE = pos_att_type __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 300 return config def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
331
1
'''simple docstring''' def a__ ( a__ , a__ ): """simple docstring""" if discount_rate < 0: raise ValueError("""Discount rate cannot be negative""" ) if not cash_flows: raise ValueError("""Cash flows list cannot be empty""" ) __SCREAMING_SNAKE_CASE = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(a__ ) ) return round(a__ , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
331
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ ) __SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. __SCREAMING_SNAKE_CASE = sum(single_char_strings.values() ) # one length string __SCREAMING_SNAKE_CASE = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __SCREAMING_SNAKE_CASE = single_char_strings[ch] __SCREAMING_SNAKE_CASE = my_str / all_sum my_fir_sum += prob * math.loga(a__ ) # entropy formula. # print entropy print(F'{round(-1 * my_fir_sum ):.1f}' ) # two len string __SCREAMING_SNAKE_CASE = sum(two_char_strings.values() ) __SCREAMING_SNAKE_CASE = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __SCREAMING_SNAKE_CASE = cha + cha if sequence in two_char_strings: __SCREAMING_SNAKE_CASE = two_char_strings[sequence] __SCREAMING_SNAKE_CASE = int(a__ ) / all_sum my_sec_sum += prob * math.loga(a__ ) # print second entropy print(F'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = Counter() # type: ignore __SCREAMING_SNAKE_CASE = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(a__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
331
1
'''simple docstring''' import argparse import struct import unittest class lowerCAmelCase__ : """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : bytes ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = data # Initialize hash values __SCREAMING_SNAKE_CASE = [ 0X6a09e667, 0Xbb67ae85, 0X3c6ef372, 0Xa54ff53a, 0X510e527f, 0X9b05688c, 0X1f83d9ab, 0X5be0cd19, ] # Initialize round constants __SCREAMING_SNAKE_CASE = [ 0X428a2f98, 0X71374491, 0Xb5c0fbcf, 0Xe9b5dba5, 0X3956c25b, 0X59f111f1, 0X923f82a4, 0Xab1c5ed5, 0Xd807aa98, 0X12835b01, 0X243185be, 0X550c7dc3, 0X72be5d74, 0X80deb1fe, 0X9bdc06a7, 0Xc19bf174, 0Xe49b69c1, 0Xefbe4786, 0X0fc19dc6, 0X240ca1cc, 0X2de92c6f, 0X4a7484aa, 0X5cb0a9dc, 0X76f988da, 0X983e5152, 0Xa831c66d, 0Xb00327c8, 0Xbf597fc7, 0Xc6e00bf3, 0Xd5a79147, 0X06ca6351, 0X14292967, 0X27b70a85, 0X2e1b2138, 0X4d2c6dfc, 0X53380d13, 0X650a7354, 0X766a0abb, 0X81c2c92e, 0X92722c85, 0Xa2bfe8a1, 0Xa81a664b, 0Xc24b8b70, 0Xc76c51a3, 0Xd192e819, 0Xd6990624, 0Xf40e3585, 0X106aa070, 0X19a4c116, 0X1e376c08, 0X2748774c, 0X34b0bcb5, 0X391c0cb3, 0X4ed8aa4a, 0X5b9cca4f, 0X682e6ff3, 0X748f82ee, 0X78a5636f, 0X84c87814, 0X8cc70208, 0X90befffa, 0Xa4506ceb, 0Xbef9a3f7, 0Xc67178f2, ] __SCREAMING_SNAKE_CASE = self.preprocessing(self.data ) self.final_hash() @staticmethod def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : bytes ) -> bytes: """simple docstring""" __SCREAMING_SNAKE_CASE = b"""\x80""" + (b"""\x00""" * (63 - (len(__SCREAMING_SNAKE_CASE ) + 8) % 64)) __SCREAMING_SNAKE_CASE = struct.pack(""">Q""" , (len(__SCREAMING_SNAKE_CASE ) * 8) ) return data + padding + big_endian_integer def UpperCAmelCase__ ( self : Tuple ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [ self.preprocessed_data[x : x + 64] for x in range(0 , len(self.preprocessed_data ) , 64 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers __SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" , __SCREAMING_SNAKE_CASE ) ) # add 48 0-ed integers words += [0] * 48 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.hashes for index in range(0 , 64 ): if index > 15: # modify the zero-ed indexes at the end of the array __SCREAMING_SNAKE_CASE = ( self.ror(words[index - 15] , 7 ) ^ self.ror(words[index - 15] , 18 ) ^ (words[index - 15] >> 3) ) __SCREAMING_SNAKE_CASE = ( self.ror(words[index - 2] , 17 ) ^ self.ror(words[index - 2] , 19 ) ^ (words[index - 2] >> 10) ) __SCREAMING_SNAKE_CASE = ( words[index - 16] + sa + words[index - 7] + sa ) % 0X100000000 # Compression __SCREAMING_SNAKE_CASE = self.ror(__SCREAMING_SNAKE_CASE , 6 ) ^ self.ror(__SCREAMING_SNAKE_CASE , 11 ) ^ self.ror(__SCREAMING_SNAKE_CASE , 25 ) __SCREAMING_SNAKE_CASE = (e & f) ^ ((~e & 0Xffffffff) & g) __SCREAMING_SNAKE_CASE = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0X100000000 __SCREAMING_SNAKE_CASE = self.ror(__SCREAMING_SNAKE_CASE , 2 ) ^ self.ror(__SCREAMING_SNAKE_CASE , 13 ) ^ self.ror(__SCREAMING_SNAKE_CASE , 22 ) __SCREAMING_SNAKE_CASE = (a & b) ^ (a & c) ^ (b & c) __SCREAMING_SNAKE_CASE = (sa + maj) % 0X100000000 __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( g, f, e, ((d + tempa) % 0X100000000), c, b, a, ((tempa + tempa) % 0X100000000), ) __SCREAMING_SNAKE_CASE = [a, b, c, d, e, f, g, h] # Modify final values __SCREAMING_SNAKE_CASE = [ ((element + mutated_hash_values[index]) % 0X100000000) for index, element in enumerate(self.hashes ) ] __SCREAMING_SNAKE_CASE = """""".join([hex(__SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for value in self.hashes] ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" return 0Xffffffff & (value << (32 - rotations)) | (value >> rotations) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> None: """simple docstring""" import hashlib __SCREAMING_SNAKE_CASE = bytes("""Test String""" , """utf-8""" ) self.assertEqual(SHAaaa(__SCREAMING_SNAKE_CASE ).hash , hashlib.shaaaa(__SCREAMING_SNAKE_CASE ).hexdigest() ) def a__ ( ): """simple docstring""" import doctest doctest.testmod() __SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() __SCREAMING_SNAKE_CASE = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: __SCREAMING_SNAKE_CASE = f.read() else: __SCREAMING_SNAKE_CASE = bytes(a__ , """utf-8""" ) print(SHAaaa(a__ ).hash ) if __name__ == "__main__": main()
331
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def a__ ( a__ ): """simple docstring""" return x + 2 class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} ) __SCREAMING_SNAKE_CASE = """x = y""" __SCREAMING_SNAKE_CASE = {"""y""": 5} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 5, """y""": 5} ) def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = """y = add_two(x)""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result is None assert "tried to execute add_two" in out.out def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} ) def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3\ny = 5""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 2} ) __SCREAMING_SNAKE_CASE = {"""x""": 8} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 8, """y""": 5} ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """y = x""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 3} ) def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} ) __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""range""": range} , state=__SCREAMING_SNAKE_CASE ) assert result == 2 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 2, """i""": 2} )
331
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : int = logging.get_logger(__name__) class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = ["pixel_values"] def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : Optional[Any]=PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = size_divisor __SCREAMING_SNAKE_CASE = resample super().__init__(**__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> np.ndarray: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(__SCREAMING_SNAKE_CASE ) # Rounds the height and width down to the closest multiple of size_divisor __SCREAMING_SNAKE_CASE = height // size_divisor * size_divisor __SCREAMING_SNAKE_CASE = width // size_divisor * size_divisor __SCREAMING_SNAKE_CASE = resize(__SCREAMING_SNAKE_CASE , (new_h, new_w) , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return image def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = None , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> np.ndarray: """simple docstring""" return rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[Union[TensorType, str]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : int , ) -> BatchFeature: """simple docstring""" __SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE = size_divisor if size_divisor is not None else self.size_divisor __SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError("""size_divisor is required for resizing""" ) __SCREAMING_SNAKE_CASE = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError("""Invalid image(s)""" ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for img in images] if do_resize: __SCREAMING_SNAKE_CASE = [self.resize(__SCREAMING_SNAKE_CASE , size_divisor=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE = [self.rescale(__SCREAMING_SNAKE_CASE , scale=1 / 255 ) for image in images] __SCREAMING_SNAKE_CASE = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images] __SCREAMING_SNAKE_CASE = {"""pixel_values""": images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
331
'''simple docstring''' import os def a__ ( a__ = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file: __SCREAMING_SNAKE_CASE = [ [int(a__ ) for element in line.split(""",""" )] for line in input_file.readlines() ] __SCREAMING_SNAKE_CASE = len(a__ ) __SCREAMING_SNAKE_CASE = len(matrix[0] ) __SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )] for i in range(a__ ): __SCREAMING_SNAKE_CASE = matrix[i][0] for j in range(1 , a__ ): for i in range(a__ ): __SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , a__ ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f"""{solution() = }""")
331
1
'''simple docstring''' import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser UpperCAmelCase : Optional[int] = re.compile(R'\s+') def a__ ( a__ ): """simple docstring""" return {"hash": hashlib.mda(re.sub(a__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = [len(a__ ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(a__ ), "line_max": max(a__ )} def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def a__ ( a__ , a__ ): """simple docstring""" if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def a__ ( a__ , a__=5 ): """simple docstring""" __SCREAMING_SNAKE_CASE = ["""auto-generated""", """autogenerated""", """automatically generated"""] __SCREAMING_SNAKE_CASE = example["""content"""].splitlines() for _, line in zip(range(a__ ) , a__ ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def a__ ( a__ , a__=5 , a__=0.05 ): """simple docstring""" __SCREAMING_SNAKE_CASE = ["""unit tests""", """test file""", """configuration file"""] __SCREAMING_SNAKE_CASE = example["""content"""].splitlines() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 # first test for _, line in zip(range(a__ ) , a__ ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test __SCREAMING_SNAKE_CASE = example["""content"""].count("""\n""" ) __SCREAMING_SNAKE_CASE = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = ["""def """, """class """, """for """, """while """] __SCREAMING_SNAKE_CASE = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def a__ ( a__ , a__=4 ): """simple docstring""" __SCREAMING_SNAKE_CASE = example["""content"""].splitlines() __SCREAMING_SNAKE_CASE = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""] __SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(a__ ) return {"ratio": ratio} def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = {} results.update(get_hash(a__ ) ) results.update(line_stats(a__ ) ) results.update(alpha_stats(a__ ) ) results.update(char_token_ratio(a__ ) ) results.update(is_autogenerated(a__ ) ) results.update(is_config_or_test(a__ ) ) results.update(has_no_keywords(a__ ) ) results.update(has_few_assignments(a__ ) ) return results def a__ ( a__ , a__ , a__ ): """simple docstring""" if not check_uniques(a__ , a__ ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def a__ ( a__ ): """simple docstring""" with open(a__ , """rb""" ) as f_in: with gzip.open(str(a__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(a__ , a__ ) os.unlink(a__ ) # Settings UpperCAmelCase : Dict = HfArgumentParser(PreprocessingArguments) UpperCAmelCase : Optional[int] = parser.parse_args() if args.num_workers is None: UpperCAmelCase : int = multiprocessing.cpu_count() UpperCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset UpperCAmelCase : List[Any] = time.time() UpperCAmelCase : str = load_dataset(args.dataset_name, split='train') print(f"""Time to load dataset: {time.time()-t_start:.2f}""") # Run preprocessing UpperCAmelCase : int = time.time() UpperCAmelCase : List[Any] = ds.map(preprocess, num_proc=args.num_workers) print(f"""Time to preprocess dataset: {time.time()-t_start:.2f}""") # Deduplicate hashes UpperCAmelCase : Optional[Any] = set(ds.unique('hash')) UpperCAmelCase : Dict = len(uniques) / len(ds) print(f"""Fraction of duplicates: {1-frac:.2%}""") # Deduplicate data and apply heuristics UpperCAmelCase : Optional[int] = time.time() UpperCAmelCase : Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args}) print(f"""Time to filter dataset: {time.time()-t_start:.2f}""") print(f"""Size of filtered dataset: {len(ds_filter)}""") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: UpperCAmelCase : Dict = time.time() UpperCAmelCase , UpperCAmelCase : Tuple = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f"""Time to deduplicate dataset: {time.time()-t_start:.2f}""") print(f"""Size of deduplicate dataset: {len(ds_filter)}""") # Save data in batches of samples_per_file UpperCAmelCase : Optional[Any] = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / 'duplicate_clusters.json', 'w') as f: json.dump(duplicate_clusters, f) UpperCAmelCase : Union[str, Any] = output_dir / 'data' data_dir.mkdir(exist_ok=True) UpperCAmelCase : str = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): UpperCAmelCase : str = str(data_dir / f"""file-{file_number+1:012}.json""") UpperCAmelCase : List[str] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f"""Time to save dataset: {time.time()-t_start:.2f}""")
331
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Any = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : Optional[Any] = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Dict = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys()) UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCAmelCase__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir ) __SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = config __SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute' setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) ) if tokenizer is None: __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = tokenizer __SCREAMING_SNAKE_CASE = MODEL_MODES[mode] if model is None: __SCREAMING_SNAKE_CASE = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = model def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler] __SCREAMING_SNAKE_CASE = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1} return scheduler def UpperCAmelCase__ ( self : int ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model __SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""] __SCREAMING_SNAKE_CASE = [ { """params""": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters """weight_decay""": self.hparams.weight_decay, }, { """params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] if self.hparams.adafactor: __SCREAMING_SNAKE_CASE = Adafactor( __SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = AdamW( __SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __SCREAMING_SNAKE_CASE = optimizer __SCREAMING_SNAKE_CASE = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: """simple docstring""" return self.validation_end(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: """simple docstring""" if stage == "test": __SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset ) else: __SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int: """simple docstring""" raise NotImplementedError("""You must implement this for your task""" ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" return self.train_loader def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" return os.path.join( self.hparams.data_dir , """cached_{}_{}_{}""".format( __SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" ) __SCREAMING_SNAKE_CASE = self.step_count self.model.save_pretrained(__SCREAMING_SNAKE_CASE ) self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) @staticmethod def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int: """simple docstring""" parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument( """--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , ) parser.add_argument( """--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , ) parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" ) parser.add_argument( """--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" ) parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--adafactor""" , action="""store_true""" ) class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__SCREAMING_SNAKE_CASE ) class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""] __SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]: """simple docstring""" rank_zero_info("""***** Validation results *****""" ) __SCREAMING_SNAKE_CASE = trainer.callback_metrics # Log results for key in sorted(__SCREAMING_SNAKE_CASE ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str: """simple docstring""" rank_zero_info("""***** Test results *****""" ) __SCREAMING_SNAKE_CASE = trainer.callback_metrics # Log and save results to file __SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer: for key in sorted(__SCREAMING_SNAKE_CASE ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) def a__ ( a__ , a__ ): """simple docstring""" parser.add_argument( """--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=a__ , default="""O2""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ ) parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" ) parser.add_argument( """--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" ) parser.add_argument( """--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , ) def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=a__ ) # add custom checkpoints if checkpoint_callback is None: __SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(a__ ) if logging_callback is None: __SCREAMING_SNAKE_CASE = LoggingCallback() __SCREAMING_SNAKE_CASE = {} if args.fpaa: __SCREAMING_SNAKE_CASE = 16 if args.gpus > 1: __SCREAMING_SNAKE_CASE = """auto""" __SCREAMING_SNAKE_CASE = """ddp""" __SCREAMING_SNAKE_CASE = args.accumulate_grad_batches __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = """auto""" __SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args( a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , ) if args.do_train: trainer.fit(a__ ) else: print("""RAG modeling tests with new set functions successfuly executed!""" ) return trainer
331
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : int = { 'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "wavlm" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : List[str]=768 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=3_072 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , __SCREAMING_SNAKE_CASE : Tuple="group" , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Any=(512, 512, 512, 512, 512, 512, 512) , __SCREAMING_SNAKE_CASE : Any=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE : str=(10, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : int=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=320 , __SCREAMING_SNAKE_CASE : Optional[int]=800 , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Dict=0.05 , __SCREAMING_SNAKE_CASE : Tuple=10 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=10 , __SCREAMING_SNAKE_CASE : Optional[Any]=320 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=100 , __SCREAMING_SNAKE_CASE : Any=256 , __SCREAMING_SNAKE_CASE : str=256 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : List[str]="mean" , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : Tuple=256 , __SCREAMING_SNAKE_CASE : Optional[Any]=(512, 512, 512, 512, 1_500) , __SCREAMING_SNAKE_CASE : Union[str, Any]=(5, 3, 3, 1, 1) , __SCREAMING_SNAKE_CASE : Tuple=(1, 2, 3, 1, 1) , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : List[Any]=80 , __SCREAMING_SNAKE_CASE : List[Any]=0 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Optional[Any]: """simple docstring""" super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = feat_extract_norm __SCREAMING_SNAKE_CASE = feat_extract_activation __SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = conv_bias __SCREAMING_SNAKE_CASE = num_buckets __SCREAMING_SNAKE_CASE = max_bucket_distance __SCREAMING_SNAKE_CASE = num_conv_pos_embeddings __SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups __SCREAMING_SNAKE_CASE = len(self.conv_dim ) __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = activation_dropout __SCREAMING_SNAKE_CASE = feat_proj_dropout __SCREAMING_SNAKE_CASE = final_dropout __SCREAMING_SNAKE_CASE = layerdrop __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_ctc_classes __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = do_stable_layer_norm __SCREAMING_SNAKE_CASE = use_weighted_layer_sum __SCREAMING_SNAKE_CASE = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __SCREAMING_SNAKE_CASE = apply_spec_augment __SCREAMING_SNAKE_CASE = mask_time_prob __SCREAMING_SNAKE_CASE = mask_time_length __SCREAMING_SNAKE_CASE = mask_time_min_masks __SCREAMING_SNAKE_CASE = mask_feature_prob __SCREAMING_SNAKE_CASE = mask_feature_length # parameters for pretraining with codevector quantized representations __SCREAMING_SNAKE_CASE = num_codevectors_per_group __SCREAMING_SNAKE_CASE = num_codevector_groups __SCREAMING_SNAKE_CASE = contrastive_logits_temperature __SCREAMING_SNAKE_CASE = num_negatives __SCREAMING_SNAKE_CASE = codevector_dim __SCREAMING_SNAKE_CASE = proj_codevector_dim __SCREAMING_SNAKE_CASE = diversity_loss_weight # ctc loss __SCREAMING_SNAKE_CASE = ctc_loss_reduction __SCREAMING_SNAKE_CASE = ctc_zero_infinity # adapter __SCREAMING_SNAKE_CASE = add_adapter __SCREAMING_SNAKE_CASE = adapter_kernel_size __SCREAMING_SNAKE_CASE = adapter_stride __SCREAMING_SNAKE_CASE = num_adapter_layers __SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __SCREAMING_SNAKE_CASE = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = list(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = xvector_output_dim @property def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
331
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = (DDPMScheduler,) def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = { """num_train_timesteps""": 1_000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: __SCREAMING_SNAKE_CASE = -1 else: __SCREAMING_SNAKE_CASE = timesteps[i + 1] __SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
331
1
'''simple docstring''' from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class lowerCAmelCase__ : """simple docstring""" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = 13 __SCREAMING_SNAKE_CASE = 7 __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = 99 __SCREAMING_SNAKE_CASE = 384 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 37 __SCREAMING_SNAKE_CASE = """gelu""" __SCREAMING_SNAKE_CASE = 0.1 __SCREAMING_SNAKE_CASE = 0.1 __SCREAMING_SNAKE_CASE = 512 __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 0.02 __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = 4 __SCREAMING_SNAKE_CASE = 128 __SCREAMING_SNAKE_CASE = 2 __SCREAMING_SNAKE_CASE = 9 __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = None def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFConvBertModel(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __SCREAMING_SNAKE_CASE = [input_ids, input_mask] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = TFConvBertForMaskedLM(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = TFConvBertForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = TFConvBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) __SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) __SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) __SCREAMING_SNAKE_CASE = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = TFConvBertForTokenClassification(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFConvBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase__ = ( { "feature-extraction": TFConvBertModel, "fill-mask": TFConvBertForMaskedLM, "question-answering": TFConvBertForQuestionAnswering, "text-classification": TFConvBertForSequenceClassification, "token-classification": TFConvBertForTokenClassification, "zero-shot": TFConvBertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = TFConvBertModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True if hasattr(__SCREAMING_SNAKE_CASE , """use_cache""" ): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) __SCREAMING_SNAKE_CASE = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(model(__SCREAMING_SNAKE_CASE ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__SCREAMING_SNAKE_CASE , saved_model=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = os.path.join(__SCREAMING_SNAKE_CASE , """saved_model""" , """1""" ) __SCREAMING_SNAKE_CASE = tf.keras.models.load_model(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: __SCREAMING_SNAKE_CASE = outputs["""encoder_hidden_states"""] __SCREAMING_SNAKE_CASE = outputs["""encoder_attentions"""] else: __SCREAMING_SNAKE_CASE = outputs["""hidden_states"""] __SCREAMING_SNAKE_CASE = outputs["""attentions"""] self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length ) __SCREAMING_SNAKE_CASE = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length ) __SCREAMING_SNAKE_CASE = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = getattr(self.model_tester , """key_length""" , __SCREAMING_SNAKE_CASE ) def check_decoder_attentions_output(__SCREAMING_SNAKE_CASE : List[str] ): __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) self.assertEqual(out_len % 2 , 0 ) __SCREAMING_SNAKE_CASE = outputs.decoder_attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(__SCREAMING_SNAKE_CASE : Union[str, Any] ): __SCREAMING_SNAKE_CASE = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_decoder_attentions_output(__SCREAMING_SNAKE_CASE ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__SCREAMING_SNAKE_CASE ) ) self.assertEqual(model.config.output_hidden_states , __SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(__SCREAMING_SNAKE_CASE ) @require_tf class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" ) __SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = [1, 6, 768] self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = tf.constant( [ [ [-0.03475493, -0.4686034, -0.30638832], [0.22637248, -0.26988646, -0.7423424], [0.10324868, -0.45013508, -0.58280784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
331
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase : Dict = TypeVar('T') def a__ ( a__ ): """simple docstring""" return (position - 1) // 2 def a__ ( a__ ): """simple docstring""" return (2 * position) + 1 def a__ ( a__ ): """simple docstring""" return (2 * position) + 2 class lowerCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __len__( self : Optional[Any] ) -> int: """simple docstring""" return self.elements def __repr__( self : List[str] ) -> str: """simple docstring""" return str(self.heap ) def UpperCAmelCase__ ( self : Tuple ) -> bool: """simple docstring""" return self.elements == 0 def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" self.heap.append((elem, weight) ) __SCREAMING_SNAKE_CASE = self.elements self.elements += 1 self._bubble_up(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> T: """simple docstring""" if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0] self._bubble_down(__SCREAMING_SNAKE_CASE ) return elem def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE = (elem, weight) if position > 0: __SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] if curr_pos == 0: return None __SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_up(__SCREAMING_SNAKE_CASE ) return None def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE = get_child_left_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = get_child_right_position(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements and child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) else: return None if child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) return None def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __SCREAMING_SNAKE_CASE = nodea_pos __SCREAMING_SNAKE_CASE = nodea_pos class lowerCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : Union[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __repr__( self : Dict ) -> str: """simple docstring""" return str(self.connections ) def __len__( self : Dict ) -> int: """simple docstring""" return self.nodes def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" if node not in self.connections: __SCREAMING_SNAKE_CASE = {} self.nodes += 1 def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" self.add_node(__SCREAMING_SNAKE_CASE ) self.add_node(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = weight __SCREAMING_SNAKE_CASE = weight def a__ ( a__ , ): """simple docstring""" __SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections} __SCREAMING_SNAKE_CASE = {node: None for node in graph.connections} __SCREAMING_SNAKE_CASE = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(a__ , a__ ) if priority_queue.is_empty(): return dist, parent # initialization __SCREAMING_SNAKE_CASE = priority_queue.extract_min() __SCREAMING_SNAKE_CASE = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(a__ , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node # running prim's algorithm while not priority_queue.is_empty(): __SCREAMING_SNAKE_CASE = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(a__ , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node return dist, parent
331
1
'''simple docstring''' import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : """simple docstring""" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=13 , __SCREAMING_SNAKE_CASE : Tuple=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : List[Any]=16 , __SCREAMING_SNAKE_CASE : List[Any]=36 , __SCREAMING_SNAKE_CASE : str=6 , __SCREAMING_SNAKE_CASE : Union[str, Any]=6 , __SCREAMING_SNAKE_CASE : Optional[Any]=6 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = embedding_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_hidden_groups __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = AlbertModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = AlbertForPreTraining(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , sentence_order_label=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = AlbertForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = AlbertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = AlbertForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = AlbertForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = AlbertForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any=False ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) return inputs_dict def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = AlbertModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained("""albert-base-v2""" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
'''simple docstring''' from __future__ import annotations from cmath import sqrt def a__ ( a__ , a__ , a__ ): """simple docstring""" if a == 0: raise ValueError("""Coefficient 'a' must not be zero.""" ) __SCREAMING_SNAKE_CASE = b * b - 4 * a * c __SCREAMING_SNAKE_CASE = (-b + sqrt(a__ )) / (2 * a) __SCREAMING_SNAKE_CASE = (-b - sqrt(a__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 ) print(F'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
331
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" ) __SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )["""last_hidden_state"""] __SCREAMING_SNAKE_CASE = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) # compare the actual values for a slice. __SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
331
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) # TODO: upload to AWS UpperCAmelCase : Optional[int] = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "retribert" def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any: """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = share_encoders __SCREAMING_SNAKE_CASE = projection_dim
331
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable UpperCAmelCase : List[str] = list[list[float | int]] def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = len(a__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(a__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for row in range(a__ ): for col in range(a__ ): __SCREAMING_SNAKE_CASE = matrix[row][col] __SCREAMING_SNAKE_CASE = vector[row][0] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 while row < size and col < size: # pivoting __SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(a__ , a__ ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , a__ ): __SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col] __SCREAMING_SNAKE_CASE = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , a__ ): for row in range(a__ ): __SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col] for cola in range(a__ , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(a__ ) ] def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = len(a__ ) __SCREAMING_SNAKE_CASE = [[0 for _ in range(a__ )] for _ in range(a__ )] __SCREAMING_SNAKE_CASE = [[0] for _ in range(a__ )] __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for x_val, y_val in enumerate(a__ ): for col in range(a__ ): __SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1) __SCREAMING_SNAKE_CASE = y_val __SCREAMING_SNAKE_CASE = solve(a__ , a__ ) def interpolated_func(a__ ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(a__ ) ) return interpolated_func def a__ ( a__ ): """simple docstring""" return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def a__ ( a__ = question_function , a__ = 10 ): """simple docstring""" __SCREAMING_SNAKE_CASE = [func(a__ ) for x_val in range(1 , order + 1 )] __SCREAMING_SNAKE_CASE = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = 42 for poly in polynomials: __SCREAMING_SNAKE_CASE = 1 while func(a__ ) == poly(a__ ): x_val += 1 ret += poly(a__ ) return ret if __name__ == "__main__": print(f"""{solution() = }""")
331
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = AltDiffusionPipeline lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) __SCREAMING_SNAKE_CASE = 77 __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]: """simple docstring""" if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A photo of an astronaut""" __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" ) __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
331
1
'''simple docstring''' import math UpperCAmelCase : List[str] = 1_0 UpperCAmelCase : Tuple = 7 UpperCAmelCase : Optional[Any] = BALLS_PER_COLOUR * NUM_COLOURS def a__ ( a__ = 20 ): """simple docstring""" __SCREAMING_SNAKE_CASE = math.comb(a__ , a__ ) __SCREAMING_SNAKE_CASE = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a__ ) __SCREAMING_SNAKE_CASE = NUM_COLOURS * (1 - missing_colour / total) return F'{result:.9f}' if __name__ == "__main__": print(solution(2_0))
331
'''simple docstring''' import argparse import os import re import packaging.version UpperCAmelCase : Optional[int] = 'examples/' UpperCAmelCase : List[str] = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCAmelCase : Union[str, Any] = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } UpperCAmelCase : Tuple = 'README.md' def a__ ( a__ , a__ , a__ ): """simple docstring""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern] __SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ ) __SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ ) with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(a__ ) def a__ ( a__ ): """simple docstring""" for folder, directories, fnames in os.walk(a__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" ) def a__ ( a__ , a__=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(a__ , a__ , a__ ) if not patch: update_version_in_examples(a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures""" __SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.readlines() # Find the start of the list. __SCREAMING_SNAKE_CASE = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): __SCREAMING_SNAKE_CASE = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(a__ ) def a__ ( ): """simple docstring""" with open(REPLACE_FILES["""init"""] , """r""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0] return packaging.version.parse(a__ ) def a__ ( a__=False ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: __SCREAMING_SNAKE_CASE = default_version.base_version elif patch: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. __SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = default_version print(F'Updating version to {version}.' ) global_version_update(a__ , patch=a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() __SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0' __SCREAMING_SNAKE_CASE = current_version.base_version # Check with the user we got that right. __SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = dev_version print(F'Updating version to {version}.' ) global_version_update(a__ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCAmelCase : Dict = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
331
1
'''simple docstring''' import argparse import os import re import packaging.version UpperCAmelCase : Optional[int] = 'examples/' UpperCAmelCase : List[str] = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCAmelCase : Union[str, Any] = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } UpperCAmelCase : Tuple = 'README.md' def a__ ( a__ , a__ , a__ ): """simple docstring""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern] __SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ ) __SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ ) with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(a__ ) def a__ ( a__ ): """simple docstring""" for folder, directories, fnames in os.walk(a__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" ) def a__ ( a__ , a__=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(a__ , a__ , a__ ) if not patch: update_version_in_examples(a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures""" __SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.readlines() # Find the start of the list. __SCREAMING_SNAKE_CASE = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): __SCREAMING_SNAKE_CASE = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(a__ ) def a__ ( ): """simple docstring""" with open(REPLACE_FILES["""init"""] , """r""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0] return packaging.version.parse(a__ ) def a__ ( a__=False ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: __SCREAMING_SNAKE_CASE = default_version.base_version elif patch: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. __SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = default_version print(F'Updating version to {version}.' ) global_version_update(a__ , patch=a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() __SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0' __SCREAMING_SNAKE_CASE = current_version.base_version # Check with the user we got that right. __SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = dev_version print(F'Updating version to {version}.' ) global_version_update(a__ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCAmelCase : Dict = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
331
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : """simple docstring""" def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 300 return config def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""MRA does not output attentions""" ) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" return @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_265 __SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_265 __SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
1
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase : List[Any] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase : Optional[int] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase : Tuple = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[ """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Optional[int]=False ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = compute_bleu( reference_corpus=__SCREAMING_SNAKE_CASE , translation_corpus=__SCREAMING_SNAKE_CASE , max_order=__SCREAMING_SNAKE_CASE , smooth=__SCREAMING_SNAKE_CASE ) ((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
331
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__) @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase__ = 10000 lowerCAmelCase__ = None lowerCAmelCase__ = None class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase__ = ParquetConfig def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) __SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ): __SCREAMING_SNAKE_CASE = data_files if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] __SCREAMING_SNAKE_CASE = [] for split_name, files in data_files.items(): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) ) break splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) ) return splits def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table: """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' ) for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' ) raise
331
1
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('0.8.3'): raise Exception('requires gluonnlp == 0.8.3') if version.parse(mx.__version__) != version.parse('1.5.0'): raise Exception('requires mxnet == 1.5.0') logging.set_verbosity_info() UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : str = 'The Nymphenburg Palace is a beautiful palace in Munich!' def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = { """attention_cell""": """multi_head""", """num_layers""": 4, """units""": 10_24, """hidden_size""": 7_68, """max_length""": 5_12, """num_heads""": 8, """scaled""": True, """dropout""": 0.1, """use_residual""": True, """embed_size""": 10_24, """embed_dropout""": 0.1, """word_embed""": None, """layer_norm_eps""": 1E-5, """token_type_vocab_size""": 2, } __SCREAMING_SNAKE_CASE = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py __SCREAMING_SNAKE_CASE = BERTEncoder( attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=a__ , output_all_encodings=a__ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , a__ ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later __SCREAMING_SNAKE_CASE = """openwebtext_ccnews_stories_books_cased""" # Specify download folder to Gluonnlp's vocab __SCREAMING_SNAKE_CASE = os.path.join(get_home_dir() , """models""" ) __SCREAMING_SNAKE_CASE = _load_vocab(a__ , a__ , a__ , cls=a__ ) __SCREAMING_SNAKE_CASE = nlp.model.BERTModel( a__ , len(a__ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=a__ , use_token_type_embed=a__ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=a__ , use_decoder=a__ , ) original_bort.load_parameters(a__ , cast_dtype=a__ , ignore_extra=a__ ) __SCREAMING_SNAKE_CASE = original_bort._collect_params_with_prefix() # Build our config 🤗 __SCREAMING_SNAKE_CASE = { """architectures""": ["""BertForMaskedLM"""], """attention_probs_dropout_prob""": predefined_args["""dropout"""], """hidden_act""": """gelu""", """hidden_dropout_prob""": predefined_args["""dropout"""], """hidden_size""": predefined_args["""embed_size"""], """initializer_range""": 0.02, """intermediate_size""": predefined_args["""hidden_size"""], """layer_norm_eps""": predefined_args["""layer_norm_eps"""], """max_position_embeddings""": predefined_args["""max_length"""], """model_type""": """bort""", """num_attention_heads""": predefined_args["""num_heads"""], """num_hidden_layers""": predefined_args["""num_layers"""], """pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa """type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa """vocab_size""": len(a__ ), } __SCREAMING_SNAKE_CASE = BertConfig.from_dict(a__ ) __SCREAMING_SNAKE_CASE = BertForMaskedLM(a__ ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(a__ ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(a__ , a__ ): __SCREAMING_SNAKE_CASE = hf_param.shape __SCREAMING_SNAKE_CASE = to_torch(params[gluon_param] ) __SCREAMING_SNAKE_CASE = gluon_param.shape assert ( shape_hf == shape_gluon ), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param __SCREAMING_SNAKE_CASE = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" ) __SCREAMING_SNAKE_CASE = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" ) __SCREAMING_SNAKE_CASE = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" ) __SCREAMING_SNAKE_CASE = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) __SCREAMING_SNAKE_CASE = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): __SCREAMING_SNAKE_CASE = hf_bort_model.bert.encoder.layer[i] # self attention __SCREAMING_SNAKE_CASE = layer.attention.self __SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) __SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) __SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) __SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) __SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) __SCREAMING_SNAKE_CASE = check_and_map_params( self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output __SCREAMING_SNAKE_CASE = layer.attention.output __SCREAMING_SNAKE_CASE = check_and_map_params( self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' ) __SCREAMING_SNAKE_CASE = check_and_map_params( self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' ) __SCREAMING_SNAKE_CASE = check_and_map_params( self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' ) __SCREAMING_SNAKE_CASE = check_and_map_params( self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate __SCREAMING_SNAKE_CASE = layer.intermediate __SCREAMING_SNAKE_CASE = check_and_map_params( intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) __SCREAMING_SNAKE_CASE = check_and_map_params( intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output __SCREAMING_SNAKE_CASE = layer.output __SCREAMING_SNAKE_CASE = check_and_map_params( bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) __SCREAMING_SNAKE_CASE = check_and_map_params( bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) __SCREAMING_SNAKE_CASE = check_and_map_params( bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) __SCREAMING_SNAKE_CASE = check_and_map_params( bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models __SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-base""" ) __SCREAMING_SNAKE_CASE = tokenizer.encode_plus(a__ )["""input_ids"""] # Get gluon output __SCREAMING_SNAKE_CASE = mx.nd.array([input_ids] ) __SCREAMING_SNAKE_CASE = original_bort(inputs=a__ , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(a__ ) __SCREAMING_SNAKE_CASE = BertModel.from_pretrained(a__ ) hf_bort_model.eval() __SCREAMING_SNAKE_CASE = tokenizer.encode_plus(a__ , return_tensors="""pt""" ) __SCREAMING_SNAKE_CASE = hf_bort_model(**a__ )[0] __SCREAMING_SNAKE_CASE = output_gluon[0].asnumpy() __SCREAMING_SNAKE_CASE = output_hf[0].detach().numpy() __SCREAMING_SNAKE_CASE = np.max(np.abs(hf_layer - gluon_layer ) ).item() __SCREAMING_SNAKE_CASE = np.allclose(a__ , a__ , atol=1E-3 ) if success: print("""✔️ Both model do output the same tensors""" ) else: print("""❌ Both model do **NOT** output the same tensors""" ) print("""Absolute difference is:""" , a__ ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase : Any = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
331
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCAmelCase : Any = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] UpperCAmelCase : Optional[Any] = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] UpperCAmelCase : Optional[int] = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[str] = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[Any] = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def a__ ( a__ , a__ ): """simple docstring""" for tf_name, hf_name in patterns: __SCREAMING_SNAKE_CASE = k.replace(a__ , a__ ) return k def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ ) __SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ ) __SCREAMING_SNAKE_CASE = torch_model.state_dict() __SCREAMING_SNAKE_CASE = {} # separating decoder weights __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = DECODER_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = REMAINING_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' __SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""] __SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ ) __SCREAMING_SNAKE_CASE = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = ["""global_step"""] for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ): __SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name ) if skip_key: continue __SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ ) __SCREAMING_SNAKE_CASE = array return tf_weights def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ ) __SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCAmelCase : int = parser.parse_args() UpperCAmelCase : Dict = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
331
1
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def a__ ( a__ ): """simple docstring""" return {key.lstrip("""-""" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = ArgumentParser( """HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=a__ ) __SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""datasets-cli command helpers""" ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(a__ ) EnvironmentCommand.register_subcommand(a__ ) TestCommand.register_subcommand(a__ ) RunBeamCommand.register_subcommand(a__ ) DummyDataCommand.register_subcommand(a__ ) # Parse args __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_known_args() if not hasattr(a__ , """func""" ): parser.print_help() exit(1 ) __SCREAMING_SNAKE_CASE = parse_unknown_args(a__ ) # Run __SCREAMING_SNAKE_CASE = args.func(a__ , **a__ ) service.run() if __name__ == "__main__": main()
331
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(a ) class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} if prompt is not None: __SCREAMING_SNAKE_CASE = prompt if generate_kwargs is not None: __SCREAMING_SNAKE_CASE = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __SCREAMING_SNAKE_CASE = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) __SCREAMING_SNAKE_CASE = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError( f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. ' """Note also that one single text can be provided for conditional image to text generation.""" ) __SCREAMING_SNAKE_CASE = self.model.config.model_type if model_type == "git": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids __SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(__SCREAMING_SNAKE_CASE ) else: raise ValueError(f'Model type {model_type} does not support conditional text generation' ) else: __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __SCREAMING_SNAKE_CASE = None return model_inputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs["""input_ids"""] ) ): __SCREAMING_SNAKE_CASE = None if generate_kwargs is None: __SCREAMING_SNAKE_CASE = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name ) __SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs: __SCREAMING_SNAKE_CASE = { """generated_text""": self.tokenizer.decode( __SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , ) } records.append(__SCREAMING_SNAKE_CASE ) return records
331
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: UpperCAmelCase : Tuple = None UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : List[str] = { 'vocab_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model', }, 'tokenizer_file': { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json', }, } UpperCAmelCase : int = { 'xlnet-base-cased': None, 'xlnet-large-cased': None, } UpperCAmelCase : Tuple = '▁' # Segments (not really needed) UpperCAmelCase : str = 0 UpperCAmelCase : Union[str, Any] = 1 UpperCAmelCase : Dict = 2 UpperCAmelCase : Optional[Any] = 3 UpperCAmelCase : int = 4 class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = "left" lowerCAmelCase__ = XLNetTokenizer def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : List[Any]="<s>" , __SCREAMING_SNAKE_CASE : int="</s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , __SCREAMING_SNAKE_CASE : Tuple="<sep>" , __SCREAMING_SNAKE_CASE : Dict="<pad>" , __SCREAMING_SNAKE_CASE : Any="<cls>" , __SCREAMING_SNAKE_CASE : List[str]="<mask>" , __SCREAMING_SNAKE_CASE : Tuple=["<eop>", "<eod>"] , **__SCREAMING_SNAKE_CASE : str , ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token super().__init__( vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = 3 __SCREAMING_SNAKE_CASE = do_lower_case __SCREAMING_SNAKE_CASE = remove_space __SCREAMING_SNAKE_CASE = keep_accents __SCREAMING_SNAKE_CASE = vocab_file __SCREAMING_SNAKE_CASE = False if not self.vocab_file else True def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = [self.sep_token_id] __SCREAMING_SNAKE_CASE = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return __SCREAMING_SNAKE_CASE = os.path.join( __SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ): copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE ) return (out_vocab_file,)
331
'''simple docstring''' def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = len(a__ ) while cur > 1: # Find the maximum number in arr __SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi __SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )] # Reverse whole list __SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )] cur -= 1 return arr if __name__ == "__main__": UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase : str = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
331
1
'''simple docstring''' from sklearn.metrics import mean_squared_error import datasets UpperCAmelCase : Any = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' UpperCAmelCase : List[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n' UpperCAmelCase : Any = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html""" ] , ) def UpperCAmelCase__ ( self : List[str] ) -> Any: """simple docstring""" if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("""float""" ) ), "references": datasets.Sequence(datasets.Value("""float""" ) ), } else: return { "predictions": datasets.Value("""float""" ), "references": datasets.Value("""float""" ), } def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any="uniform_average" , __SCREAMING_SNAKE_CASE : Any=True ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = mean_squared_error( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE , multioutput=__SCREAMING_SNAKE_CASE , squared=__SCREAMING_SNAKE_CASE ) return {"mse": mse}
331
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) ) __SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" ) __SCREAMING_SNAKE_CASE = """""" with open(a__ ) as f: __SCREAMING_SNAKE_CASE = f.readline() __SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] __SCREAMING_SNAKE_CASE = [ word for word in [sum(ord(a__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(a__ ) if __name__ == "__main__": print(solution())
331
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : Tuple = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "dpr" def __init__( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=30_522 , __SCREAMING_SNAKE_CASE : Dict=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : Optional[int]=3_072 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Dict=1E-12 , __SCREAMING_SNAKE_CASE : Optional[int]=0 , __SCREAMING_SNAKE_CASE : Optional[Any]="absolute" , __SCREAMING_SNAKE_CASE : int = 0 , **__SCREAMING_SNAKE_CASE : Optional[Any] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = projection_dim __SCREAMING_SNAKE_CASE = position_embedding_type
331
'''simple docstring''' class lowerCAmelCase__ : # Public class to implement a graph """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = row __SCREAMING_SNAKE_CASE = col __SCREAMING_SNAKE_CASE = graph def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1] __SCREAMING_SNAKE_CASE = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands. """simple docstring""" __SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )] __SCREAMING_SNAKE_CASE = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) count += 1 return count
331
1
'''simple docstring''' import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=a ) class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCAmelCase__ = Features({"text": Value("string" )} ) lowerCAmelCase__ = Features({"labels": ClassLabel} ) lowerCAmelCase__ = "text" lowerCAmelCase__ = "labels" def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]: """simple docstring""" if self.label_column not in features: raise ValueError(f'Column {self.label_column} is not present in features.' ) if not isinstance(features[self.label_column] , __SCREAMING_SNAKE_CASE ): raise ValueError(f'Column {self.label_column} is not a ClassLabel.' ) __SCREAMING_SNAKE_CASE = copy.deepcopy(self ) __SCREAMING_SNAKE_CASE = self.label_schema.copy() __SCREAMING_SNAKE_CASE = features[self.label_column] __SCREAMING_SNAKE_CASE = label_schema return task_template @property def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict[str, str]: """simple docstring""" return { self.text_column: "text", self.label_column: "labels", }
331
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_choices def UpperCAmelCase__ ( self : Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_attention_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self ) @slow def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_flax class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) __SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_000 __SCREAMING_SNAKE_CASE = (1, 6, vocab_size) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : Tuple = logging.get_logger(__name__) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __SCREAMING_SNAKE_CASE = 1_28 elif "12-12" in model_name: __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = 12 elif "14-14" in model_name: __SCREAMING_SNAKE_CASE = 14 __SCREAMING_SNAKE_CASE = 14 elif "16-16" in model_name: __SCREAMING_SNAKE_CASE = 16 __SCREAMING_SNAKE_CASE = 16 else: raise ValueError("""Model not supported""" ) __SCREAMING_SNAKE_CASE = 'huggingface/label-files' if "speech-commands" in model_name: __SCREAMING_SNAKE_CASE = 35 __SCREAMING_SNAKE_CASE = 'speech-commands-v2-id2label.json' else: __SCREAMING_SNAKE_CASE = 5_27 __SCREAMING_SNAKE_CASE = 'audioset-id2label.json' __SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) ) __SCREAMING_SNAKE_CASE = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE = idalabel __SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} return config def a__ ( a__ ): """simple docstring""" if "module.v" in name: __SCREAMING_SNAKE_CASE = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: __SCREAMING_SNAKE_CASE = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: __SCREAMING_SNAKE_CASE = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: __SCREAMING_SNAKE_CASE = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: __SCREAMING_SNAKE_CASE = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __SCREAMING_SNAKE_CASE = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: __SCREAMING_SNAKE_CASE = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: __SCREAMING_SNAKE_CASE = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def a__ ( a__ , a__ ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE = orig_state_dict.pop(_UpperCAmelCase ) if "qkv" in key: __SCREAMING_SNAKE_CASE = key.split(""".""" ) __SCREAMING_SNAKE_CASE = int(key_split[3] ) __SCREAMING_SNAKE_CASE = config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE = val[:dim, :] __SCREAMING_SNAKE_CASE = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE = val[-dim:, :] else: __SCREAMING_SNAKE_CASE = val[:dim] __SCREAMING_SNAKE_CASE = val[dim : dim * 2] __SCREAMING_SNAKE_CASE = val[-dim:] else: __SCREAMING_SNAKE_CASE = val return orig_state_dict def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = [ 'module.v.head.weight', 'module.v.head.bias', 'module.v.head_dist.weight', 'module.v.head_dist.bias', ] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) @torch.no_grad() def a__ ( a__ , a__ , a__=False ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_audio_spectrogram_transformer_config(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = { 'ast-finetuned-audioset-10-10-0.4593': ( 'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.450': ( 'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448': ( 'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1' ), 'ast-finetuned-audioset-10-10-0.448-v2': ( 'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1' ), 'ast-finetuned-audioset-12-12-0.447': ( 'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1' ), 'ast-finetuned-audioset-14-14-0.443': ( 'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1' ), 'ast-finetuned-audioset-16-16-0.442': ( 'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1' ), 'ast-finetuned-speech-commands-v2': ( 'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1' ), } # load original state_dict __SCREAMING_SNAKE_CASE = model_name_to_url[model_name] __SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="""cpu""" ) # remove some keys remove_keys(_UpperCAmelCase ) # rename some keys __SCREAMING_SNAKE_CASE = convert_state_dict(_UpperCAmelCase , _UpperCAmelCase ) # load 🤗 model __SCREAMING_SNAKE_CASE = ASTForAudioClassification(_UpperCAmelCase ) model.eval() model.load_state_dict(_UpperCAmelCase ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __SCREAMING_SNAKE_CASE = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978 __SCREAMING_SNAKE_CASE = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526 __SCREAMING_SNAKE_CASE = 10_24 if 'speech-commands' not in model_name else 1_28 __SCREAMING_SNAKE_CASE = ASTFeatureExtractor(mean=_UpperCAmelCase , std=_UpperCAmelCase , max_length=_UpperCAmelCase ) if "speech-commands" in model_name: __SCREAMING_SNAKE_CASE = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) __SCREAMING_SNAKE_CASE = dataset[0]['audio']['array'] else: __SCREAMING_SNAKE_CASE = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) __SCREAMING_SNAKE_CASE = torchaudio.load(_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = waveform.squeeze().numpy() __SCREAMING_SNAKE_CASE = feature_extractor(_UpperCAmelCase , sampling_rate=1_60_00 , return_tensors="""pt""" ) # forward pass __SCREAMING_SNAKE_CASE = model(**_UpperCAmelCase ) __SCREAMING_SNAKE_CASE = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __SCREAMING_SNAKE_CASE = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __SCREAMING_SNAKE_CASE = torch.tensor([-1.1_986, -7.0_903, -8.2_718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __SCREAMING_SNAKE_CASE = torch.tensor([-2.6_128, -8.0_080, -9.4_344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __SCREAMING_SNAKE_CASE = torch.tensor([-1.5_080, -7.4_534, -8.8_917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __SCREAMING_SNAKE_CASE = torch.tensor([-0.5_050, -6.5_833, -8.0_843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __SCREAMING_SNAKE_CASE = torch.tensor([-0.3_826, -7.0_336, -8.2_413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __SCREAMING_SNAKE_CASE = torch.tensor([-1.2_113, -6.9_101, -8.3_470] ) elif model_name == "ast-finetuned-speech-commands-v2": __SCREAMING_SNAKE_CASE = torch.tensor([6.1_589, -8.0_566, -8.7_984] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ): raise ValueError("""Logits don\'t match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(_UpperCAmelCase ) print(F'Saving feature extractor to {pytorch_dump_folder_path}' ) feature_extractor.save_pretrained(_UpperCAmelCase ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(F'MIT/{model_name}' ) feature_extractor.push_to_hub(F'MIT/{model_name}' ) if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='ast-finetuned-audioset-10-10-0.4593', type=str, help='Name of the Audio Spectrogram Transformer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase : Dict = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
350
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "markuplm" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple: """simple docstring""" super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout # additional properties __SCREAMING_SNAKE_CASE = max_depth __SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings __SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings __SCREAMING_SNAKE_CASE = tag_pad_id __SCREAMING_SNAKE_CASE = subs_pad_id __SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
331
0
'''simple docstring''' import gc import unittest from transformers import CTRLConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, ) class lowerCAmelCase__ : """simple docstring""" def __init__( self : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=14 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : str=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : Dict=37 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Any=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : List[Any]=None , ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = use_mc_token_ids __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope __SCREAMING_SNAKE_CASE = self.vocab_size - 1 def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_mc_token_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.num_choices] , self.seq_length ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, token_type_ids, mc_token_ids, sequence_labels, token_labels, choice_labels, ) def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return CTRLConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , *__SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = CTRLModel(config=__A ) model.to(__A ) model.eval() model(__A , token_type_ids=__A , head_mask=__A ) model(__A , token_type_ids=__A ) __SCREAMING_SNAKE_CASE = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(len(result.past_key_values ) , config.n_layer ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = CTRLLMHeadModel(__A ) model.to(__A ) model.eval() __SCREAMING_SNAKE_CASE = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( __SCREAMING_SNAKE_CASE ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask} return config, inputs_dict def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , *__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = CTRLForSequenceClassification(__A ) model.to(__A ) model.eval() __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = model(__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) @require_torch class lowerCAmelCase__ ( A__ , A__ , A__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else () lowerCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else () lowerCAmelCase__ = ( { "feature-extraction": CTRLModel, "text-classification": CTRLForSequenceClassification, "text-generation": CTRLLMHeadModel, "zero-shot": CTRLForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]: """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny # config could not be created. return True return False def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = CTRLModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__A , n_embd=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_ctrl_model(*__A ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*__A ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @slow def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = CTRLModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :) def UpperCAmelCase__ ( self : Dict ) -> Any: """simple docstring""" pass @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() torch.cuda.empty_cache() @slow def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = CTRLLMHeadModel.from_pretrained("""ctrl""" ) model.to(__A ) __SCREAMING_SNAKE_CASE = torch.tensor( [[11_859, 0, 1_611, 8]] , dtype=torch.long , device=__A ) # Legal the president is __SCREAMING_SNAKE_CASE = [ 11_859, 0, 1_611, 8, 5, 150, 26_449, 2, 19, 348, 469, 3, 2_595, 48, 20_740, 246_533, 246_533, 19, 30, 5, ] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a __SCREAMING_SNAKE_CASE = model.generate(__A , do_sample=__A ) self.assertListEqual(output_ids[0].tolist() , __A )
351
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
331
0
'''simple docstring''' import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class lowerCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" lowerCAmelCase__ = ComputeEnvironment.AMAZON_SAGEMAKER lowerCAmelCase__ = True lowerCAmelCase__ = "ml.p3.2xlarge" lowerCAmelCase__ = "accelerate_sagemaker_execution_role" lowerCAmelCase__ = "hf-sm" lowerCAmelCase__ = "us-east-1" lowerCAmelCase__ = 1 lowerCAmelCase__ = "accelerate-sagemaker-1" lowerCAmelCase__ = "1.6" lowerCAmelCase__ = "4.4" lowerCAmelCase__ = "train.py" lowerCAmelCase__ = [ "--model_name_or_path", "bert", "--do_train", "False", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] lowerCAmelCase__ = [ "--model_name_or_path", "bert", "--do_train", "--do_test", "False", "--do_predict", "--epochs", "3", "--learning_rate", "5e-5", "--max_steps", "50.5", ] class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args["""model_name_or_path"""] , _UpperCAmelCase ) assert isinstance(converted_args["""do_train"""] , _UpperCAmelCase ) assert isinstance(converted_args["""epochs"""] , _UpperCAmelCase ) assert isinstance(converted_args["""learning_rate"""] , _UpperCAmelCase ) assert isinstance(converted_args["""max_steps"""] , _UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
352
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
0
'''simple docstring''' import argparse UpperCAmelCase : Optional[Any] = 'docs/source/_static/js/custom.js' def a__ ( a__ ): """simple docstring""" with open(__lowerCAmelCase , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.readlines() __SCREAMING_SNAKE_CASE = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 __SCREAMING_SNAKE_CASE = F'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += F' "v{version}": "v{version}",\n' with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__lowerCAmelCase ) if __name__ == "__main__": UpperCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('--version', help='Release version.') UpperCAmelCase : Tuple = parser.parse_args() update_custom_js(args.version)
353
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = relative_attention __SCREAMING_SNAKE_CASE = position_biased_input __SCREAMING_SNAKE_CASE = pos_att_type __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 300 return config def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
331
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase__ ( self : Dict , **__SCREAMING_SNAKE_CASE : Tuple ) -> Optional[int]: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]: """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]: """simple docstring""" return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase__ ( self : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE = self.get_image_processor() __SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) __SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) __SCREAMING_SNAKE_CASE = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_image_processor() __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) __SCREAMING_SNAKE_CASE = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE = image_processor(_lowerCamelCase , return_tensors="""np""" ) __SCREAMING_SNAKE_CASE = processor(images=_lowerCamelCase , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_image_processor() __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) __SCREAMING_SNAKE_CASE = '''lower newer''' __SCREAMING_SNAKE_CASE = processor(text=_lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer(_lowerCamelCase , padding="""max_length""" , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_image_processor() __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) __SCREAMING_SNAKE_CASE = '''lower newer''' __SCREAMING_SNAKE_CASE = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_image_processor() __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) __SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __SCREAMING_SNAKE_CASE = processor.batch_decode(_lowerCamelCase ) __SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_image_processor() __SCREAMING_SNAKE_CASE = self.get_tokenizer() __SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) __SCREAMING_SNAKE_CASE = '''lower newer''' __SCREAMING_SNAKE_CASE = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
354
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ ) __SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. __SCREAMING_SNAKE_CASE = sum(single_char_strings.values() ) # one length string __SCREAMING_SNAKE_CASE = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __SCREAMING_SNAKE_CASE = single_char_strings[ch] __SCREAMING_SNAKE_CASE = my_str / all_sum my_fir_sum += prob * math.loga(a__ ) # entropy formula. # print entropy print(F'{round(-1 * my_fir_sum ):.1f}' ) # two len string __SCREAMING_SNAKE_CASE = sum(two_char_strings.values() ) __SCREAMING_SNAKE_CASE = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __SCREAMING_SNAKE_CASE = cha + cha if sequence in two_char_strings: __SCREAMING_SNAKE_CASE = two_char_strings[sequence] __SCREAMING_SNAKE_CASE = int(a__ ) / all_sum my_sec_sum += prob * math.loga(a__ ) # print second entropy print(F'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = Counter() # type: ignore __SCREAMING_SNAKE_CASE = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(a__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
331
0
'''simple docstring''' from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowerCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int = 16 , __SCREAMING_SNAKE_CASE : int = 88 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "geglu" , __SCREAMING_SNAKE_CASE : Optional[int] = None , ) -> List[Any]: """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE = nn.ModuleList( [ TransformeraDModel( num_attention_heads=_a , attention_head_dim=_a , in_channels=_a , num_layers=_a , dropout=_a , norm_num_groups=_a , cross_attention_dim=_a , attention_bias=_a , sample_size=_a , num_vector_embeds=_a , activation_fn=_a , num_embeds_ada_norm=_a , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference __SCREAMING_SNAKE_CASE = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` __SCREAMING_SNAKE_CASE = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` __SCREAMING_SNAKE_CASE = [1, 0] def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : bool = True , ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = hidden_states __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens __SCREAMING_SNAKE_CASE = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] __SCREAMING_SNAKE_CASE = self.transformer_index_for_condition[i] __SCREAMING_SNAKE_CASE = self.transformers[transformer_index]( _a , encoder_hidden_states=_a , timestep=_a , cross_attention_kwargs=_a , return_dict=_a , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] __SCREAMING_SNAKE_CASE = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) __SCREAMING_SNAKE_CASE = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=_a )
355
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def a__ ( a__ ): """simple docstring""" return x + 2 class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} ) __SCREAMING_SNAKE_CASE = """x = y""" __SCREAMING_SNAKE_CASE = {"""y""": 5} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 5, """y""": 5} ) def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = """y = add_two(x)""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result is None assert "tried to execute add_two" in out.out def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} ) def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3\ny = 5""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 2} ) __SCREAMING_SNAKE_CASE = {"""x""": 8} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 8, """y""": 5} ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """y = x""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 3} ) def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} ) __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""range""": range} , state=__SCREAMING_SNAKE_CASE ) assert result == 2 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 2, """i""": 2} )
331
0
'''simple docstring''' from scipy.stats import pearsonr import datasets UpperCAmelCase : Dict = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ UpperCAmelCase : Dict = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ UpperCAmelCase : Optional[int] = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=False ) -> Optional[Any]: """simple docstring""" if return_pvalue: __SCREAMING_SNAKE_CASE = pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )[0] )}
356
'''simple docstring''' import os def a__ ( a__ = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file: __SCREAMING_SNAKE_CASE = [ [int(a__ ) for element in line.split(""",""" )] for line in input_file.readlines() ] __SCREAMING_SNAKE_CASE = len(a__ ) __SCREAMING_SNAKE_CASE = len(matrix[0] ) __SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )] for i in range(a__ ): __SCREAMING_SNAKE_CASE = matrix[i][0] for j in range(1 , a__ ): for i in range(a__ ): __SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , a__ ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f"""{solution() = }""")
331
0
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]: """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ): __SCREAMING_SNAKE_CASE = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_snake_case ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sgugger/tiny-distilbert-classification""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case ) # set architectures equal to `None` __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == """cpu""" , """Can\'t do half precision""" ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case ) __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tinier_bart""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case ) __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case ) __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tinier_bart""" __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(_snake_case ) __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case , configs=[config] ) __SCREAMING_SNAKE_CASE = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_snake_case , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_snake_case , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_snake_case , """train_time.csv""" ) , env_info_csv_file=os.path.join(_snake_case , """env.csv""" ) , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case ) benchmark.run() self.assertTrue(Path(os.path.join(_snake_case , """inf_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_snake_case , """train_time.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_snake_case , """inf_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_snake_case , """train_mem.csv""" ) ).exists() ) self.assertTrue(Path(os.path.join(_snake_case , """env.csv""" ) ).exists() ) def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(__SCREAMING_SNAKE_CASE : Tuple ): self.assertTrue(hasattr(_snake_case , """sequential""" ) ) self.assertTrue(hasattr(_snake_case , """cumulative""" ) ) self.assertTrue(hasattr(_snake_case , """current""" ) ) self.assertTrue(hasattr(_snake_case , """total""" ) ) with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE = PyTorchBenchmarkArguments( models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , """log.txt""" ) , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , ) __SCREAMING_SNAKE_CASE = PyTorchBenchmark(_snake_case ) __SCREAMING_SNAKE_CASE = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(_snake_case , """log.txt""" ) ).exists() )
357
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Any = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : Optional[Any] = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Dict = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys()) UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCAmelCase__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir ) __SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = config __SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute' setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) ) if tokenizer is None: __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = tokenizer __SCREAMING_SNAKE_CASE = MODEL_MODES[mode] if model is None: __SCREAMING_SNAKE_CASE = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = model def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler] __SCREAMING_SNAKE_CASE = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1} return scheduler def UpperCAmelCase__ ( self : int ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model __SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""] __SCREAMING_SNAKE_CASE = [ { """params""": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters """weight_decay""": self.hparams.weight_decay, }, { """params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] if self.hparams.adafactor: __SCREAMING_SNAKE_CASE = Adafactor( __SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = AdamW( __SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __SCREAMING_SNAKE_CASE = optimizer __SCREAMING_SNAKE_CASE = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: """simple docstring""" return self.validation_end(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: """simple docstring""" if stage == "test": __SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset ) else: __SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int: """simple docstring""" raise NotImplementedError("""You must implement this for your task""" ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" return self.train_loader def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" return os.path.join( self.hparams.data_dir , """cached_{}_{}_{}""".format( __SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" ) __SCREAMING_SNAKE_CASE = self.step_count self.model.save_pretrained(__SCREAMING_SNAKE_CASE ) self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) @staticmethod def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int: """simple docstring""" parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument( """--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , ) parser.add_argument( """--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , ) parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" ) parser.add_argument( """--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" ) parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--adafactor""" , action="""store_true""" ) class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__SCREAMING_SNAKE_CASE ) class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""] __SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]: """simple docstring""" rank_zero_info("""***** Validation results *****""" ) __SCREAMING_SNAKE_CASE = trainer.callback_metrics # Log results for key in sorted(__SCREAMING_SNAKE_CASE ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str: """simple docstring""" rank_zero_info("""***** Test results *****""" ) __SCREAMING_SNAKE_CASE = trainer.callback_metrics # Log and save results to file __SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer: for key in sorted(__SCREAMING_SNAKE_CASE ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) def a__ ( a__ , a__ ): """simple docstring""" parser.add_argument( """--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=a__ , default="""O2""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ ) parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" ) parser.add_argument( """--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" ) parser.add_argument( """--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , ) def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=a__ ) # add custom checkpoints if checkpoint_callback is None: __SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(a__ ) if logging_callback is None: __SCREAMING_SNAKE_CASE = LoggingCallback() __SCREAMING_SNAKE_CASE = {} if args.fpaa: __SCREAMING_SNAKE_CASE = 16 if args.gpus > 1: __SCREAMING_SNAKE_CASE = """auto""" __SCREAMING_SNAKE_CASE = """ddp""" __SCREAMING_SNAKE_CASE = args.accumulate_grad_batches __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = """auto""" __SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args( a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , ) if args.do_train: trainer.fit(a__ ) else: print("""RAG modeling tests with new set functions successfuly executed!""" ) return trainer
331
0
from ..utils import DummyObject, requires_backends class lowerCAmelCase__ ( metaclass=lowercase__ ): """simple docstring""" lowerCAmelCase__ = ['note_seq'] def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
358
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = (DDPMScheduler,) def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = { """num_train_timesteps""": 1_000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: __SCREAMING_SNAKE_CASE = -1 else: __SCREAMING_SNAKE_CASE = timesteps[i + 1] __SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
331
0
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def a__ ( a__ , a__ , a__ , a__ , ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = coefficient_matrix.shape __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = constant_matrix.shape if rowsa != colsa: __SCREAMING_SNAKE_CASE = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(a__ ) if colsa != 1: __SCREAMING_SNAKE_CASE = F'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(a__ ) if rowsa != rowsa: __SCREAMING_SNAKE_CASE = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ F'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(a__ ) if len(a__ ) != rowsa: __SCREAMING_SNAKE_CASE = ( """Number of initial values must be equal to number of rows in coefficient """ F'matrix but received {len(a__ )} and {rowsa}' ) raise ValueError(a__ ) if iterations <= 0: raise ValueError("""Iterations must be at least 1""" ) __SCREAMING_SNAKE_CASE = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = table.shape strictly_diagonally_dominant(a__ ) # Iterates the whole matrix for given number of times for _ in range(a__ ): __SCREAMING_SNAKE_CASE = [] for row in range(a__ ): __SCREAMING_SNAKE_CASE = 0 for col in range(a__ ): if col == row: __SCREAMING_SNAKE_CASE = table[row][col] elif col == cols - 1: __SCREAMING_SNAKE_CASE = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] __SCREAMING_SNAKE_CASE = (temp + val) / denom new_val.append(a__ ) __SCREAMING_SNAKE_CASE = new_val return [float(a__ ) for i in new_val] def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = table.shape __SCREAMING_SNAKE_CASE = True for i in range(0 , a__ ): __SCREAMING_SNAKE_CASE = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
359
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase : Dict = TypeVar('T') def a__ ( a__ ): """simple docstring""" return (position - 1) // 2 def a__ ( a__ ): """simple docstring""" return (2 * position) + 1 def a__ ( a__ ): """simple docstring""" return (2 * position) + 2 class lowerCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __len__( self : Optional[Any] ) -> int: """simple docstring""" return self.elements def __repr__( self : List[str] ) -> str: """simple docstring""" return str(self.heap ) def UpperCAmelCase__ ( self : Tuple ) -> bool: """simple docstring""" return self.elements == 0 def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" self.heap.append((elem, weight) ) __SCREAMING_SNAKE_CASE = self.elements self.elements += 1 self._bubble_up(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> T: """simple docstring""" if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0] self._bubble_down(__SCREAMING_SNAKE_CASE ) return elem def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE = (elem, weight) if position > 0: __SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] if curr_pos == 0: return None __SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_up(__SCREAMING_SNAKE_CASE ) return None def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE = get_child_left_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = get_child_right_position(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements and child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) else: return None if child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) return None def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __SCREAMING_SNAKE_CASE = nodea_pos __SCREAMING_SNAKE_CASE = nodea_pos class lowerCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : Union[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __repr__( self : Dict ) -> str: """simple docstring""" return str(self.connections ) def __len__( self : Dict ) -> int: """simple docstring""" return self.nodes def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" if node not in self.connections: __SCREAMING_SNAKE_CASE = {} self.nodes += 1 def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" self.add_node(__SCREAMING_SNAKE_CASE ) self.add_node(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = weight __SCREAMING_SNAKE_CASE = weight def a__ ( a__ , ): """simple docstring""" __SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections} __SCREAMING_SNAKE_CASE = {node: None for node in graph.connections} __SCREAMING_SNAKE_CASE = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(a__ , a__ ) if priority_queue.is_empty(): return dist, parent # initialization __SCREAMING_SNAKE_CASE = priority_queue.extract_min() __SCREAMING_SNAKE_CASE = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(a__ , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node # running prim's algorithm while not priority_queue.is_empty(): __SCREAMING_SNAKE_CASE = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(a__ , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node return dist, parent
331
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A_ ) class lowerCAmelCase__ ( A_ ): """simple docstring""" lowerCAmelCase__ = field(default="language-modeling" , metadata={"include_in_asdict_even_if_is_default": True} ) lowerCAmelCase__ = Features({"text": Value("string" )} ) lowerCAmelCase__ = Features({} ) lowerCAmelCase__ = "text" @property def UpperCAmelCase__ ( self : List[Any] ) -> Dict[str, str]: """simple docstring""" return {self.text_column: "text"}
360
'''simple docstring''' from __future__ import annotations from cmath import sqrt def a__ ( a__ , a__ , a__ ): """simple docstring""" if a == 0: raise ValueError("""Coefficient 'a' must not be zero.""" ) __SCREAMING_SNAKE_CASE = b * b - 4 * a * c __SCREAMING_SNAKE_CASE = (-b + sqrt(a__ )) / (2 * a) __SCREAMING_SNAKE_CASE = (-b - sqrt(a__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 ) print(F'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
331
0
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = MvpTokenizer lowerCAmelCase__ = MvpTokenizerFast lowerCAmelCase__ = True lowerCAmelCase__ = filter_roberta_detectors def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __SCREAMING_SNAKE_CASE = dict(zip(_a , range(len(_a ) ) ) ) __SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""} __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_a ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(_a ) ) def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a ) def UpperCAmelCase__ ( self : Any , **__SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_a ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> Dict: """simple docstring""" return "lower newer", "lower newer" @cached_property def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" ) @cached_property def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" ) @require_torch def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __SCREAMING_SNAKE_CASE = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __SCREAMING_SNAKE_CASE = tokenizer(_a , max_length=len(_a ) , padding=_a , return_tensors="""pt""" ) self.assertIsInstance(_a , _a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0] self.assertListEqual(_a , _a ) # Test that special tokens are reset @require_torch def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __SCREAMING_SNAKE_CASE = tokenizer(_a , padding=_a , return_tensors="""pt""" ) # check if input_ids are returned and no labels self.assertIn("""input_ids""" , _a ) self.assertIn("""attention_mask""" , _a ) self.assertNotIn("""labels""" , _a ) self.assertNotIn("""decoder_attention_mask""" , _a ) @require_torch def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __SCREAMING_SNAKE_CASE = tokenizer(text_target=_a , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __SCREAMING_SNAKE_CASE = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=_a , truncation=_a , return_tensors="""pt""" ) self.assertIsInstance(_a , _a ) self.assertEqual(batch.input_ids.shape , (2, 1_024) ) @require_torch def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ["""A long paragraph for summarization."""] __SCREAMING_SNAKE_CASE = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __SCREAMING_SNAKE_CASE = tokenizer(_a , text_target=_a , return_tensors="""pt""" ) __SCREAMING_SNAKE_CASE = inputs["""input_ids"""] __SCREAMING_SNAKE_CASE = inputs["""labels"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): __SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(_a , **_a ) __SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(_a , **_a ) __SCREAMING_SNAKE_CASE = """A, <mask> AllenNLP sentence.""" __SCREAMING_SNAKE_CASE = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) __SCREAMING_SNAKE_CASE = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __SCREAMING_SNAKE_CASE = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __SCREAMING_SNAKE_CASE = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( _a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( _a , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
361
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) # TODO: upload to AWS UpperCAmelCase : Optional[int] = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "retribert" def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any: """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = share_encoders __SCREAMING_SNAKE_CASE = projection_dim
331
0
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. UpperCAmelCase : List[str] = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n' class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) ) __SCREAMING_SNAKE_CASE = self.transformer_dir shutil.copy( os.path.join(__SCREAMING_SNAKE_CASE , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , ) def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = 'src/transformers' shutil.rmtree(self.transformer_dir ) def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = comment + f'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: __SCREAMING_SNAKE_CASE = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result __SCREAMING_SNAKE_CASE = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) __SCREAMING_SNAKE_CASE = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = os.path.join(self.transformer_dir , """new_code.py""" ) with open(__SCREAMING_SNAKE_CASE , """w""" , newline="""\n""" ) as f: f.write(__SCREAMING_SNAKE_CASE ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__SCREAMING_SNAKE_CASE ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: self.assertTrue(f.read() , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> str: """simple docstring""" self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __SCREAMING_SNAKE_CASE , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with a really long name __SCREAMING_SNAKE_CASE = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , f'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __SCREAMING_SNAKE_CASE , overwrite_result=re.sub("""Bert""" , """TestModel""" , __SCREAMING_SNAKE_CASE ) , ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = check_copies.LOCALIZED_READMES['README_zh-hans.md'] __SCREAMING_SNAKE_CASE = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the' ' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for' ' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong' ' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.' ' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),' ' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and' ' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same' ' method has been applied to compress GPT2 into' ' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into' ' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),' ' Multilingual BERT into' ' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German' ' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**' ' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders' ' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang' ' Luong, Quoc V. Le, Christopher D. Manning.' ) __SCREAMING_SNAKE_CASE = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) __SCREAMING_SNAKE_CASE = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.' ' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文' ' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and' ' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same' ' method has been applied to compress GPT2 into' ' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into' ' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),' ' Multilingual BERT into' ' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German' ' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自' ' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather' ' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,' ' Christopher D. Manning 发布。\n' ) __SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] ) self.assertFalse(__SCREAMING_SNAKE_CASE ) self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the' ' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for' ' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong' ' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.' ) __SCREAMING_SNAKE_CASE = ( '1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and' ' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) __SCREAMING_SNAKE_CASE = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) __SCREAMING_SNAKE_CASE = check_copies.convert_to_localized_md( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , localized_readme["""format_model_list"""] ) # Check if the model link is synchronized. self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
362
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = AltDiffusionPipeline lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) __SCREAMING_SNAKE_CASE = 77 __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]: """simple docstring""" if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A photo of an astronaut""" __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" ) __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
331
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase : Optional[Any] = { 'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'], 'tokenization_mvp': ['MvpTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = ['MvpTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'MVP_PRETRAINED_MODEL_ARCHIVE_LIST', 'MvpForCausalLM', 'MvpForConditionalGeneration', 'MvpForQuestionAnswering', 'MvpForSequenceClassification', 'MvpModel', 'MvpPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
363
'''simple docstring''' import argparse import os import re import packaging.version UpperCAmelCase : Optional[int] = 'examples/' UpperCAmelCase : List[str] = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCAmelCase : Union[str, Any] = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } UpperCAmelCase : Tuple = 'README.md' def a__ ( a__ , a__ , a__ ): """simple docstring""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern] __SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ ) __SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ ) with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(a__ ) def a__ ( a__ ): """simple docstring""" for folder, directories, fnames in os.walk(a__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" ) def a__ ( a__ , a__=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(a__ , a__ , a__ ) if not patch: update_version_in_examples(a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures""" __SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.readlines() # Find the start of the list. __SCREAMING_SNAKE_CASE = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): __SCREAMING_SNAKE_CASE = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(a__ ) def a__ ( ): """simple docstring""" with open(REPLACE_FILES["""init"""] , """r""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0] return packaging.version.parse(a__ ) def a__ ( a__=False ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: __SCREAMING_SNAKE_CASE = default_version.base_version elif patch: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. __SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = default_version print(F'Updating version to {version}.' ) global_version_update(a__ , patch=a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() __SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0' __SCREAMING_SNAKE_CASE = current_version.base_version # Check with the user we got that right. __SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = dev_version print(F'Updating version to {version}.' ) global_version_update(a__ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCAmelCase : Dict = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
331
0
'''simple docstring''' def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = [[0 for _ in range(a_ )] for _ in range(m + 1 )] for i in range(m + 1 ): __SCREAMING_SNAKE_CASE = 1 for n in range(m + 1 ): for k in range(1 , a_ ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: UpperCAmelCase : List[str] = int(input('Enter a number: ').strip()) print(partition(n)) except ValueError: print('Please enter a number.') else: try: UpperCAmelCase : Optional[int] = int(sys.argv[1]) print(partition(n)) except ValueError: print('Please pass a number.')
364
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : """simple docstring""" def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 300 return config def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""MRA does not output attentions""" ) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" return @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_265 __SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_265 __SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
0
'''simple docstring''' def a__ ( a__ , a__ ): """simple docstring""" if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(__a ) * abs(__a ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
365
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__) @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase__ = 10000 lowerCAmelCase__ = None lowerCAmelCase__ = None class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase__ = ParquetConfig def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) __SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ): __SCREAMING_SNAKE_CASE = data_files if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] __SCREAMING_SNAKE_CASE = [] for split_name, files in data_files.items(): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) ) break splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) ) return splits def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table: """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' ) for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' ) raise
331
0
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class lowerCAmelCase__ : """simple docstring""" lowerCAmelCase__ = MBartConfig lowerCAmelCase__ = {} lowerCAmelCase__ = "gelu" def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=13 , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=37 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=20 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : List[Any]=1 , __SCREAMING_SNAKE_CASE : int=0 , ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = bos_token_id def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __SCREAMING_SNAKE_CASE = prepare_mbart_inputs_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return config, inputs_dict def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModel(config=__UpperCAmelCase ).get_decoder() __SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""] __SCREAMING_SNAKE_CASE = input_ids[:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :] __SCREAMING_SNAKE_CASE = inputs_dict["""head_mask"""] __SCREAMING_SNAKE_CASE = 1 # first forward pass __SCREAMING_SNAKE_CASE = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , head_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase ) __SCREAMING_SNAKE_CASE = outputs.to_tuple() __SCREAMING_SNAKE_CASE = past_key_values[1] def a__ ( a__ , a__ , a__ , a__=None , a__=None , a__=None , a__=None , a__=None , ): """simple docstring""" if attention_mask is None: __SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowerCAmelCase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowerCAmelCase__ = ( { "conversational": TFMBartForConditionalGeneration, "feature-extraction": TFMBartModel, "summarization": TFMBartForConditionalGeneration, "text2text-generation": TFMBartForConditionalGeneration, "translation": TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]: """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFMBartModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__UpperCAmelCase ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = [ " UN Chief Says There Is No Military Solution in Syria", ] lowerCAmelCase__ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", ] lowerCAmelCase__ = "facebook/mbart-large-en-ro" @cached_property def UpperCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def UpperCAmelCase__ ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.translate_src_text(**__UpperCAmelCase ) self.assertListEqual(self.expected_text , __UpperCAmelCase ) def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text , **__UpperCAmelCase , return_tensors="""tf""" ) __SCREAMING_SNAKE_CASE = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) __SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase ) return generated_words @slow def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" self._assert_generated_batch_equal_expected()
366
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCAmelCase : Any = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] UpperCAmelCase : Optional[Any] = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] UpperCAmelCase : Optional[int] = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[str] = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[Any] = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def a__ ( a__ , a__ ): """simple docstring""" for tf_name, hf_name in patterns: __SCREAMING_SNAKE_CASE = k.replace(a__ , a__ ) return k def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ ) __SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ ) __SCREAMING_SNAKE_CASE = torch_model.state_dict() __SCREAMING_SNAKE_CASE = {} # separating decoder weights __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = DECODER_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = REMAINING_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' __SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""] __SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ ) __SCREAMING_SNAKE_CASE = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = ["""global_step"""] for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ): __SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name ) if skip_key: continue __SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ ) __SCREAMING_SNAKE_CASE = array return tf_weights def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ ) __SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCAmelCase : int = parser.parse_args() UpperCAmelCase : Dict = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
331
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = CycleDiffusionPipeline lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''} lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"} ) lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_000 , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) __SCREAMING_SNAKE_CASE = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int=0 ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = image / 2 + 0.5 if str(lowerCAmelCase__ ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = "cpu" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" ) def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_dummy_components() for name, module in components.items(): if hasattr(lowerCAmelCase__ , """half""" ): __SCREAMING_SNAKE_CASE = module.half() __SCREAMING_SNAKE_CASE = CycleDiffusionPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" return super().test_save_load_local() @unittest.skip("""non-deterministic pipeline""" ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return super().test_inference_batch_single_identical() @skip_mps def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" return super().test_dict_tuple_outputs_equivalent() @skip_mps def UpperCAmelCase__ ( self : int ) -> Tuple: """simple docstring""" return super().test_save_load_optional_components() @skip_mps def UpperCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Dict ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Tuple ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) __SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" ) __SCREAMING_SNAKE_CASE = init_image.resize((512, 512) ) __SCREAMING_SNAKE_CASE = "CompVis/stable-diffusion-v1-4" __SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" ) __SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained( lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa , revision="""fp16""" ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE = "A black colored car" __SCREAMING_SNAKE_CASE = "A blue colored car" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=lowerCAmelCase__ , source_prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase__ , output_type="""np""" , ) __SCREAMING_SNAKE_CASE = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image ).max() < 5E-1 def UpperCAmelCase__ ( self : int ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/cycle-diffusion/black_colored_car.png""" ) __SCREAMING_SNAKE_CASE = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" ) __SCREAMING_SNAKE_CASE = init_image.resize((512, 512) ) __SCREAMING_SNAKE_CASE = "CompVis/stable-diffusion-v1-4" __SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="""scheduler""" ) __SCREAMING_SNAKE_CASE = CycleDiffusionPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE = "A black colored car" __SCREAMING_SNAKE_CASE = "A blue colored car" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = pipe( prompt=lowerCAmelCase__ , source_prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowerCAmelCase__ , output_type="""np""" , ) __SCREAMING_SNAKE_CASE = output.images assert np.abs(image - expected_image ).max() < 2E-2
367
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(a ) class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} if prompt is not None: __SCREAMING_SNAKE_CASE = prompt if generate_kwargs is not None: __SCREAMING_SNAKE_CASE = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __SCREAMING_SNAKE_CASE = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) __SCREAMING_SNAKE_CASE = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError( f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. ' """Note also that one single text can be provided for conditional image to text generation.""" ) __SCREAMING_SNAKE_CASE = self.model.config.model_type if model_type == "git": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids __SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(__SCREAMING_SNAKE_CASE ) else: raise ValueError(f'Model type {model_type} does not support conditional text generation' ) else: __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __SCREAMING_SNAKE_CASE = None return model_inputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs["""input_ids"""] ) ): __SCREAMING_SNAKE_CASE = None if generate_kwargs is None: __SCREAMING_SNAKE_CASE = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name ) __SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs: __SCREAMING_SNAKE_CASE = { """generated_text""": self.tokenizer.decode( __SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , ) } records.append(__SCREAMING_SNAKE_CASE ) return records
331
0
'''simple docstring''' def a__ ( a__ ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = number while duplicate > 0: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = divmod(_UpperCamelCase , 10 ) fact_sum += factorial(_UpperCamelCase ) return fact_sum == number if __name__ == "__main__": print('Program to check whether a number is a Krisnamurthy Number or not.') UpperCAmelCase : Optional[int] = int(input('Enter number: ').strip()) print( f"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.""" )
368
'''simple docstring''' def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = len(a__ ) while cur > 1: # Find the maximum number in arr __SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi __SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )] # Reverse whole list __SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )] cur -= 1 return arr if __name__ == "__main__": UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase : str = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
331
0
'''simple docstring''' import math def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = [True] * n __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): __SCREAMING_SNAKE_CASE = i * 2 while index < n: __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = index + i __SCREAMING_SNAKE_CASE = [2] for i in range(3 , a__ , 2 ): if is_prime[i]: primes.append(a__ ) return primes def a__ ( a__ = 99_99_66_66_33_33 ): """simple docstring""" __SCREAMING_SNAKE_CASE = math.floor(math.sqrt(a__ ) ) + 1_00 __SCREAMING_SNAKE_CASE = prime_sieve(a__ ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = primes[prime_index] while (last_prime**2) <= limit: __SCREAMING_SNAKE_CASE = primes[prime_index + 1] __SCREAMING_SNAKE_CASE = last_prime**2 __SCREAMING_SNAKE_CASE = next_prime**2 # Get numbers divisible by lps(current) __SCREAMING_SNAKE_CASE = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __SCREAMING_SNAKE_CASE = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __SCREAMING_SNAKE_CASE = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __SCREAMING_SNAKE_CASE = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
369
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) ) __SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" ) __SCREAMING_SNAKE_CASE = """""" with open(a__ ) as f: __SCREAMING_SNAKE_CASE = f.readline() __SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] __SCREAMING_SNAKE_CASE = [ word for word in [sum(ord(a__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(a__ ) if __name__ == "__main__": print(solution())
331
0
'''simple docstring''' import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed UpperCAmelCase : List[str] = logging.getLogger(__name__) def a__ ( a__=2 , a__=3 , a__=16 , a__ = 10 , a__ = 2 ): def get_dataset(a__ ): __SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(__SCREAMING_SNAKE_CASE , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __SCREAMING_SNAKE_CASE = get_dataset(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = get_dataset(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , num_workers=4 ) __SCREAMING_SNAKE_CASE = DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , num_workers=4 ) return (train_dataloader, valid_dataloader) def a__ ( a__ , a__ , a__ , a__ , a__ , a__=None ): __SCREAMING_SNAKE_CASE = [] for epoch in range(__SCREAMING_SNAKE_CASE ): # Train quickly model.train() for batch in dataloader: __SCREAMING_SNAKE_CASE = batch __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) accelerator.backward(__SCREAMING_SNAKE_CASE ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class lowerCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] ) -> Dict: """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) ) __SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: """simple docstring""" return x * self.a + self.b class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE = DummyModel() __SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __SCREAMING_SNAKE_CASE = dummy_dataloaders() __SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 , project_dir=_lowerCAmelCase , automatic_checkpoint_naming=_lowerCAmelCase ) # Train baseline __SCREAMING_SNAKE_CASE = Accelerator(project_config=_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE = DummyModel() __SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __SCREAMING_SNAKE_CASE = dummy_dataloaders() # Train baseline __SCREAMING_SNAKE_CASE = Accelerator() __SCREAMING_SNAKE_CASE = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save initial __SCREAMING_SNAKE_CASE = os.path.join(_lowerCAmelCase , """initial""" ) accelerator.save_state(_lowerCAmelCase ) (__SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE = optimizer.state_dict() __SCREAMING_SNAKE_CASE = train(3 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) (__SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE = optimizer.state_dict() # Train partially set_seed(42 ) __SCREAMING_SNAKE_CASE = DummyModel() __SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __SCREAMING_SNAKE_CASE = dummy_dataloaders() __SCREAMING_SNAKE_CASE = Accelerator() __SCREAMING_SNAKE_CASE = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) accelerator.load_state(_lowerCAmelCase ) (__SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE = optimizer.state_dict() self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) __SCREAMING_SNAKE_CASE = train(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save everything __SCREAMING_SNAKE_CASE = os.path.join(_lowerCAmelCase , """checkpoint""" ) accelerator.save_state(_lowerCAmelCase ) # Load everything back in and make sure all states work accelerator.load_state(_lowerCAmelCase ) test_rands += train(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) (__SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE = optimizer.state_dict() self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE = DummyModel() __SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __SCREAMING_SNAKE_CASE = dummy_dataloaders() __SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=_lowerCAmelCase ) # Train baseline __SCREAMING_SNAKE_CASE = Accelerator(project_dir=_lowerCAmelCase , project_config=_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save initial accelerator.save_state() (__SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE = optimizer.state_dict() __SCREAMING_SNAKE_CASE = train(3 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) (__SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE = optimizer.state_dict() # Train partially set_seed(42 ) __SCREAMING_SNAKE_CASE = DummyModel() __SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __SCREAMING_SNAKE_CASE = dummy_dataloaders() __SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = Accelerator(project_dir=_lowerCAmelCase , project_config=_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) accelerator.load_state(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_0""" ) ) (__SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE = optimizer.state_dict() self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) __SCREAMING_SNAKE_CASE = train(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_1""" ) ) test_rands += train(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) (__SCREAMING_SNAKE_CASE) = model.a.item(), model.b.item() __SCREAMING_SNAKE_CASE = optimizer.state_dict() self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] ) __SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] ) __SCREAMING_SNAKE_CASE = DummyModel() __SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() ) __SCREAMING_SNAKE_CASE = Accelerator() with self.assertRaises(_lowerCAmelCase ) as ve: accelerator.register_for_checkpointing(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) __SCREAMING_SNAKE_CASE = str(ve.exception ) self.assertTrue("""Item at index 0""" in message ) self.assertTrue("""Item at index 1""" in message ) self.assertFalse("""Item at index 2""" in message ) self.assertFalse("""Item at index 3""" in message ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE = DummyModel() __SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(_lowerCAmelCase , step_size=1 , gamma=0.99 ) __SCREAMING_SNAKE_CASE = dummy_dataloaders() __SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=_lowerCAmelCase ) # Train baseline __SCREAMING_SNAKE_CASE = Accelerator(project_dir=_lowerCAmelCase , project_config=_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Save initial accelerator.save_state() __SCREAMING_SNAKE_CASE = scheduler.state_dict() train(3 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) self.assertNotEqual(_lowerCAmelCase , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_0""" ) ) self.assertEqual(_lowerCAmelCase , scheduler.state_dict() ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __SCREAMING_SNAKE_CASE = DummyModel() __SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=_lowerCAmelCase , total_limit=2 ) # Train baseline __SCREAMING_SNAKE_CASE = Accelerator(project_dir=_lowerCAmelCase , project_config=_lowerCAmelCase ) __SCREAMING_SNAKE_CASE = accelerator.prepare(_lowerCAmelCase ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_0""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_9""" ) ) ) self.assertTrue(os.path.exists(os.path.join(_lowerCAmelCase , """checkpoints""" , """checkpoint_10""" ) ) ) @require_cuda def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(_lowerCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": UpperCAmelCase : Any = '/tmp/accelerate/state_checkpointing' UpperCAmelCase : Dict = DummyModel() UpperCAmelCase : int = torch.optim.Adam(params=model.parameters(), lr=1e-3) UpperCAmelCase : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9) UpperCAmelCase , UpperCAmelCase : str = dummy_dataloaders() UpperCAmelCase : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline UpperCAmelCase : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) UpperCAmelCase , UpperCAmelCase : Optional[int] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: UpperCAmelCase : str = group['params'][0].device break assert param_device.type == accelerator.device.type UpperCAmelCase : Optional[Any] = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: UpperCAmelCase : Dict = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: UpperCAmelCase : List[Any] = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
370
'''simple docstring''' class lowerCAmelCase__ : # Public class to implement a graph """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = row __SCREAMING_SNAKE_CASE = col __SCREAMING_SNAKE_CASE = graph def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1] __SCREAMING_SNAKE_CASE = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands. """simple docstring""" __SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )] __SCREAMING_SNAKE_CASE = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) count += 1 return count
331
0
'''simple docstring''' import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures UpperCAmelCase : int = logging.get_logger(__name__) @dataclass class lowerCAmelCase__ : """simple docstring""" lowerCAmelCase__ = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} ) lowerCAmelCase__ = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) lowerCAmelCase__ = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCAmelCase__ = field( default=_UpperCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.task_name.lower() class lowerCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" lowerCAmelCase__ = "train" lowerCAmelCase__ = "dev" lowerCAmelCase__ = "test" class lowerCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 lowerCAmelCase__ = 42 def __init__( self : str , __SCREAMING_SNAKE_CASE : GlueDataTrainingArguments , __SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Union[str, Split] = Split.train , __SCREAMING_SNAKE_CASE : Optional[str] = None , ) -> int: """simple docstring""" warnings.warn( """This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """ """library. You can have a look at this example script for pointers: """ """https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowercase_ , ) __SCREAMING_SNAKE_CASE = args __SCREAMING_SNAKE_CASE = glue_processors[args.task_name]() __SCREAMING_SNAKE_CASE = glue_output_modes[args.task_name] if isinstance(lowercase_ , lowercase_ ): try: __SCREAMING_SNAKE_CASE = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) # Load data features from cache or dataset file __SCREAMING_SNAKE_CASE = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}' , ) __SCREAMING_SNAKE_CASE = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __SCREAMING_SNAKE_CASE = label_list[2], label_list[1] __SCREAMING_SNAKE_CASE = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __SCREAMING_SNAKE_CASE = cached_features_file + """.lock""" with FileLock(lowercase_ ): if os.path.exists(lowercase_ ) and not args.overwrite_cache: __SCREAMING_SNAKE_CASE = time.time() __SCREAMING_SNAKE_CASE = torch.load(lowercase_ ) logger.info( f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start ) else: logger.info(f'Creating features from dataset file at {args.data_dir}' ) if mode == Split.dev: __SCREAMING_SNAKE_CASE = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __SCREAMING_SNAKE_CASE = self.processor.get_test_examples(args.data_dir ) else: __SCREAMING_SNAKE_CASE = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __SCREAMING_SNAKE_CASE = examples[:limit_length] __SCREAMING_SNAKE_CASE = glue_convert_examples_to_features( lowercase_ , lowercase_ , max_length=args.max_seq_length , label_list=lowercase_ , output_mode=self.output_mode , ) __SCREAMING_SNAKE_CASE = time.time() torch.save(self.features , lowercase_ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' ) def __len__( self : List[Any] ) -> str: """simple docstring""" return len(self.features ) def __getitem__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" return self.features[i] def UpperCAmelCase__ ( self : int ) -> Optional[int]: """simple docstring""" return self.label_list
371
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_choices def UpperCAmelCase__ ( self : Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_attention_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self ) @slow def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_flax class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) __SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_000 __SCREAMING_SNAKE_CASE = (1, 6, vocab_size) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Optional[Any] = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
350
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "markuplm" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple: """simple docstring""" super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout # additional properties __SCREAMING_SNAKE_CASE = max_depth __SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings __SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings __SCREAMING_SNAKE_CASE = tag_pad_id __SCREAMING_SNAKE_CASE = subs_pad_id __SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
331
0
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) UpperCAmelCase : List[Any] = parser.parse_args() UpperCAmelCase : Optional[Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
351
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
331
0
'''simple docstring''' import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCAmelCase : List[Any] = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. UpperCAmelCase : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS) UpperCAmelCase : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCAmelCase : Optional[int] = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)') UpperCAmelCase : Any = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = None # source code of `config_class` __SCREAMING_SNAKE_CASE = inspect.getsource(_snake_case ) __SCREAMING_SNAKE_CASE = _re_checkpoint.findall(_snake_case ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("""/""" ): __SCREAMING_SNAKE_CASE = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link __SCREAMING_SNAKE_CASE = F'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: __SCREAMING_SNAKE_CASE = ckpt_name break return checkpoint def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue __SCREAMING_SNAKE_CASE = get_checkpoint_from_config_class(_snake_case ) __SCREAMING_SNAKE_CASE = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_snake_case ) if len(_snake_case ) > 0: __SCREAMING_SNAKE_CASE = '''\n'''.join(sorted(_snake_case ) ) raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
352
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
0
'''simple docstring''' def a__ ( a__ = 1_00 ): """simple docstring""" __SCREAMING_SNAKE_CASE = set() __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = n + 1 # maximum limit for a in range(2 , _A ): for b in range(2 , _A ): __SCREAMING_SNAKE_CASE = a**b # calculates the current power collect_powers.add(_A ) # adds the result to the set return len(_A ) if __name__ == "__main__": print('Number of terms ', solution(int(str(input()).strip())))
353
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = relative_attention __SCREAMING_SNAKE_CASE = position_biased_input __SCREAMING_SNAKE_CASE = pos_att_type __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 300 return config def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
331
0
'''simple docstring''' import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( 'The `image_to_image.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionImg2ImgPipeline` instead.' )
354
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ ) __SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. __SCREAMING_SNAKE_CASE = sum(single_char_strings.values() ) # one length string __SCREAMING_SNAKE_CASE = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __SCREAMING_SNAKE_CASE = single_char_strings[ch] __SCREAMING_SNAKE_CASE = my_str / all_sum my_fir_sum += prob * math.loga(a__ ) # entropy formula. # print entropy print(F'{round(-1 * my_fir_sum ):.1f}' ) # two len string __SCREAMING_SNAKE_CASE = sum(two_char_strings.values() ) __SCREAMING_SNAKE_CASE = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __SCREAMING_SNAKE_CASE = cha + cha if sequence in two_char_strings: __SCREAMING_SNAKE_CASE = two_char_strings[sequence] __SCREAMING_SNAKE_CASE = int(a__ ) / all_sum my_sec_sum += prob * math.loga(a__ ) # print second entropy print(F'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = Counter() # type: ignore __SCREAMING_SNAKE_CASE = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(a__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
331
0
'''simple docstring''' def a__ ( a__ , a__ , a__ ): """simple docstring""" def update_area_of_max_square(a__ , a__ ) -> int: # BASE CASE if row >= rows or col >= cols: return 0 __SCREAMING_SNAKE_CASE = update_area_of_max_square(snake_case_ , col + 1 ) __SCREAMING_SNAKE_CASE = update_area_of_max_square(row + 1 , col + 1 ) __SCREAMING_SNAKE_CASE = update_area_of_max_square(row + 1 , snake_case_ ) if mat[row][col]: __SCREAMING_SNAKE_CASE = 1 + min([right, diagonal, down] ) __SCREAMING_SNAKE_CASE = max(largest_square_area[0] , snake_case_ ) return sub_problem_sol else: return 0 __SCREAMING_SNAKE_CASE = [0] update_area_of_max_square(0 , 0 ) return largest_square_area[0] def a__ ( a__ , a__ , a__ ): """simple docstring""" def update_area_of_max_square_using_dp_array( a__ , a__ , a__ ) -> int: if row >= rows or col >= cols: return 0 if dp_array[row][col] != -1: return dp_array[row][col] __SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(snake_case_ , col + 1 , snake_case_ ) __SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , snake_case_ ) __SCREAMING_SNAKE_CASE = update_area_of_max_square_using_dp_array(row + 1 , snake_case_ , snake_case_ ) if mat[row][col]: __SCREAMING_SNAKE_CASE = 1 + min([right, diagonal, down] ) __SCREAMING_SNAKE_CASE = max(largest_square_area[0] , snake_case_ ) __SCREAMING_SNAKE_CASE = sub_problem_sol return sub_problem_sol else: return 0 __SCREAMING_SNAKE_CASE = [0] __SCREAMING_SNAKE_CASE = [[-1] * cols for _ in range(snake_case_ )] update_area_of_max_square_using_dp_array(0 , 0 , snake_case_ ) return largest_square_area[0] def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = [[0] * (cols + 1) for _ in range(rows + 1 )] __SCREAMING_SNAKE_CASE = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __SCREAMING_SNAKE_CASE = dp_array[row][col + 1] __SCREAMING_SNAKE_CASE = dp_array[row + 1][col + 1] __SCREAMING_SNAKE_CASE = dp_array[row + 1][col] if mat[row][col] == 1: __SCREAMING_SNAKE_CASE = 1 + min(snake_case_ , snake_case_ , snake_case_ ) __SCREAMING_SNAKE_CASE = max(dp_array[row][col] , snake_case_ ) else: __SCREAMING_SNAKE_CASE = 0 return largest_square_area def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = [0] * (cols + 1) __SCREAMING_SNAKE_CASE = [0] * (cols + 1) __SCREAMING_SNAKE_CASE = 0 for row in range(rows - 1 , -1 , -1 ): for col in range(cols - 1 , -1 , -1 ): __SCREAMING_SNAKE_CASE = current_row[col + 1] __SCREAMING_SNAKE_CASE = next_row[col + 1] __SCREAMING_SNAKE_CASE = next_row[col] if mat[row][col] == 1: __SCREAMING_SNAKE_CASE = 1 + min(snake_case_ , snake_case_ , snake_case_ ) __SCREAMING_SNAKE_CASE = max(current_row[col] , snake_case_ ) else: __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = current_row return largest_square_area if __name__ == "__main__": import doctest doctest.testmod() print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
355
'''simple docstring''' import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def a__ ( a__ ): """simple docstring""" return x + 2 class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} ) __SCREAMING_SNAKE_CASE = """x = y""" __SCREAMING_SNAKE_CASE = {"""y""": 5} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 5, """y""": 5} ) def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = """y = add_two(x)""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) # Won't work without the tool with CaptureStdout() as out: __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result is None assert "tried to execute add_two" in out.out def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3} ) def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 3\ny = 5""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 5} ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = """text = f'This is x: {x}.'""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """text""": """This is x: 3."""} ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """if x <= 3:\n y = 2\nelse:\n y = 5""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 2} ) __SCREAMING_SNAKE_CASE = {"""x""": 8} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 8, """y""": 5} ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) self.assertListEqual(__SCREAMING_SNAKE_CASE , [3, 5] ) self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """y = x""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {} , state=__SCREAMING_SNAKE_CASE ) assert result == 3 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """y""": 3} ) def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """test_list = [x, add_two(x)]\ntest_list[1]""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_list""": [3, 5]} ) __SCREAMING_SNAKE_CASE = """test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']""" __SCREAMING_SNAKE_CASE = {"""x""": 3} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""add_two""": add_two} , state=__SCREAMING_SNAKE_CASE ) assert result == 5 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 3, """test_dict""": {"""x""": 3, """y""": 5}} ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = """x = 0\nfor i in range(3):\n x = i""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = evaluate(__SCREAMING_SNAKE_CASE , {"""range""": range} , state=__SCREAMING_SNAKE_CASE ) assert result == 2 self.assertDictEqual(__SCREAMING_SNAKE_CASE , {"""x""": 2, """i""": 2} )
331
0
'''simple docstring''' from decimal import Decimal, getcontext from math import ceil, factorial def a__ ( a__ ): """simple docstring""" if not isinstance(lowercase__ , lowercase__ ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) __SCREAMING_SNAKE_CASE = precision __SCREAMING_SNAKE_CASE = ceil(precision / 14 ) __SCREAMING_SNAKE_CASE = 42_68_80 * Decimal(1_00_05 ).sqrt() __SCREAMING_SNAKE_CASE = 1 __SCREAMING_SNAKE_CASE = 13_59_14_09 __SCREAMING_SNAKE_CASE = Decimal(lowercase__ ) for k in range(1 , lowercase__ ): __SCREAMING_SNAKE_CASE = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3) linear_term += 5_45_14_01_34 exponential_term *= -26_25_37_41_26_40_76_80_00 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": UpperCAmelCase : str = 5_0 print(f"""The first {n} digits of pi is: {pi(n)}""")
356
'''simple docstring''' import os def a__ ( a__ = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file: __SCREAMING_SNAKE_CASE = [ [int(a__ ) for element in line.split(""",""" )] for line in input_file.readlines() ] __SCREAMING_SNAKE_CASE = len(a__ ) __SCREAMING_SNAKE_CASE = len(matrix[0] ) __SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )] for i in range(a__ ): __SCREAMING_SNAKE_CASE = matrix[i][0] for j in range(1 , a__ ): for i in range(a__ ): __SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , a__ ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f"""{solution() = }""")
331
0
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Any = { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json', } class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" lowerCAmelCase__ = 'xlnet' lowerCAmelCase__ = ['mems'] lowerCAmelCase__ = { 'n_token': 'vocab_size', # Backward compatibility 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]=32_000 , __SCREAMING_SNAKE_CASE : Tuple=1_024 , __SCREAMING_SNAKE_CASE : int=24 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : str=4_096 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]="bi" , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Tuple=-1 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : str="last" , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int="tanh" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : str=5 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : List[str]=1 , __SCREAMING_SNAKE_CASE : Any=2 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = d_model __SCREAMING_SNAKE_CASE = n_layer __SCREAMING_SNAKE_CASE = n_head if d_model % n_head != 0: raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' ) __SCREAMING_SNAKE_CASE = d_model // n_head __SCREAMING_SNAKE_CASE = ff_activation __SCREAMING_SNAKE_CASE = d_inner __SCREAMING_SNAKE_CASE = untie_r __SCREAMING_SNAKE_CASE = attn_type __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = dropout __SCREAMING_SNAKE_CASE = mem_len __SCREAMING_SNAKE_CASE = reuse_len __SCREAMING_SNAKE_CASE = bi_data __SCREAMING_SNAKE_CASE = clamp_len __SCREAMING_SNAKE_CASE = same_length __SCREAMING_SNAKE_CASE = summary_type __SCREAMING_SNAKE_CASE = summary_use_proj __SCREAMING_SNAKE_CASE = summary_activation __SCREAMING_SNAKE_CASE = summary_last_dropout __SCREAMING_SNAKE_CASE = start_n_top __SCREAMING_SNAKE_CASE = end_n_top __SCREAMING_SNAKE_CASE = bos_token_id __SCREAMING_SNAKE_CASE = pad_token_id __SCREAMING_SNAKE_CASE = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , UpperCamelCase_ , ) __SCREAMING_SNAKE_CASE = kwargs["""use_cache"""] __SCREAMING_SNAKE_CASE = use_mems_eval __SCREAMING_SNAKE_CASE = use_mems_train super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) @property def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' ) return -1 @max_position_embeddings.setter def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: """simple docstring""" raise NotImplementedError( f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
357
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Any = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : Optional[Any] = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Dict = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys()) UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCAmelCase__ ( pl.LightningModule ): """simple docstring""" def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir ) __SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = config __SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute' setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) ) if tokenizer is None: __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = tokenizer __SCREAMING_SNAKE_CASE = MODEL_MODES[mode] if model is None: __SCREAMING_SNAKE_CASE = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , ) else: __SCREAMING_SNAKE_CASE = model def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler] __SCREAMING_SNAKE_CASE = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1} return scheduler def UpperCAmelCase__ ( self : int ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model __SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""] __SCREAMING_SNAKE_CASE = [ { """params""": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters """weight_decay""": self.hparams.weight_decay, }, { """params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] if self.hparams.adafactor: __SCREAMING_SNAKE_CASE = Adafactor( __SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = AdamW( __SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __SCREAMING_SNAKE_CASE = optimizer __SCREAMING_SNAKE_CASE = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict: """simple docstring""" return self.validation_end(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: """simple docstring""" if stage == "test": __SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset ) else: __SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int: """simple docstring""" raise NotImplementedError("""You must implement this for your task""" ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" return self.train_loader def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]: """simple docstring""" return os.path.join( self.hparams.data_dir , """cached_{}_{}_{}""".format( __SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" ) __SCREAMING_SNAKE_CASE = self.step_count self.model.save_pretrained(__SCREAMING_SNAKE_CASE ) self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) @staticmethod def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int: """simple docstring""" parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument( """--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , ) parser.add_argument( """--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , ) parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" ) parser.add_argument( """--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , ) parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" ) parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE ) parser.add_argument("""--adafactor""" , action="""store_true""" ) class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__SCREAMING_SNAKE_CASE ) class lowerCAmelCase__ ( pl.Callback ): """simple docstring""" def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""] __SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]: """simple docstring""" rank_zero_info("""***** Validation results *****""" ) __SCREAMING_SNAKE_CASE = trainer.callback_metrics # Log results for key in sorted(__SCREAMING_SNAKE_CASE ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str: """simple docstring""" rank_zero_info("""***** Test results *****""" ) __SCREAMING_SNAKE_CASE = trainer.callback_metrics # Log and save results to file __SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer: for key in sorted(__SCREAMING_SNAKE_CASE ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) ) def a__ ( a__ , a__ ): """simple docstring""" parser.add_argument( """--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=a__ , default="""O2""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ ) parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" ) parser.add_argument( """--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" ) parser.add_argument( """--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , ) def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model __SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=a__ ) # add custom checkpoints if checkpoint_callback is None: __SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(a__ ) if logging_callback is None: __SCREAMING_SNAKE_CASE = LoggingCallback() __SCREAMING_SNAKE_CASE = {} if args.fpaa: __SCREAMING_SNAKE_CASE = 16 if args.gpus > 1: __SCREAMING_SNAKE_CASE = """auto""" __SCREAMING_SNAKE_CASE = """ddp""" __SCREAMING_SNAKE_CASE = args.accumulate_grad_batches __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = """auto""" __SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args( a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , ) if args.do_train: trainer.fit(a__ ) else: print("""RAG modeling tests with new set functions successfuly executed!""" ) return trainer
331
0
def a__ ( a__ = 1_00 ): """simple docstring""" __SCREAMING_SNAKE_CASE = (n * (n + 1) // 2) ** 2 __SCREAMING_SNAKE_CASE = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(f"""{solution() = }""")
358
'''simple docstring''' import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = (DDPMScheduler,) def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = { """num_train_timesteps""": 1_000, """beta_start""": 0.0001, """beta_end""": 0.02, """beta_schedule""": """linear""", """variance_type""": """fixed_small""", """clip_sample""": True, } config.update(**__SCREAMING_SNAKE_CASE ) return config def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 258.9606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(__SCREAMING_SNAKE_CASE ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 202.0296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = scheduler.timesteps for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ): if i == len(__SCREAMING_SNAKE_CASE ) - 1: __SCREAMING_SNAKE_CASE = -1 else: __SCREAMING_SNAKE_CASE = timesteps[i + 1] __SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = prev_t.item() self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0] with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] __SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE ) with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps] with self.assertRaises( __SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
331
0
'''simple docstring''' from torch import nn def a__ ( a__ ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
359
'''simple docstring''' from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar UpperCAmelCase : Dict = TypeVar('T') def a__ ( a__ ): """simple docstring""" return (position - 1) // 2 def a__ ( a__ ): """simple docstring""" return (2 * position) + 1 def a__ ( a__ ): """simple docstring""" return (2 * position) + 2 class lowerCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : List[str] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __len__( self : Optional[Any] ) -> int: """simple docstring""" return self.elements def __repr__( self : List[str] ) -> str: """simple docstring""" return str(self.heap ) def UpperCAmelCase__ ( self : Tuple ) -> bool: """simple docstring""" return self.elements == 0 def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" self.heap.append((elem, weight) ) __SCREAMING_SNAKE_CASE = self.elements self.elements += 1 self._bubble_up(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> T: """simple docstring""" if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0] self._bubble_down(__SCREAMING_SNAKE_CASE ) return elem def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE = (elem, weight) if position > 0: __SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._bubble_up(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) else: self._bubble_down(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] if curr_pos == 0: return None __SCREAMING_SNAKE_CASE = get_parent_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position] if parent_weight > weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_up(__SCREAMING_SNAKE_CASE ) return None def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.position_map[elem] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos] __SCREAMING_SNAKE_CASE = get_child_left_position(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = get_child_right_position(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements and child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) if child_left_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) else: return None if child_right_position < self.elements: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return self._bubble_down(__SCREAMING_SNAKE_CASE ) return None def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ( self.heap[nodea_pos], self.heap[nodea_pos], ) __SCREAMING_SNAKE_CASE = nodea_pos __SCREAMING_SNAKE_CASE = nodea_pos class lowerCAmelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : Union[str, Any] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = 0 def __repr__( self : Dict ) -> str: """simple docstring""" return str(self.connections ) def __len__( self : Dict ) -> int: """simple docstring""" return self.nodes def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : T ) -> None: """simple docstring""" if node not in self.connections: __SCREAMING_SNAKE_CASE = {} self.nodes += 1 def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : T , __SCREAMING_SNAKE_CASE : int ) -> None: """simple docstring""" self.add_node(__SCREAMING_SNAKE_CASE ) self.add_node(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = weight __SCREAMING_SNAKE_CASE = weight def a__ ( a__ , ): """simple docstring""" __SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections} __SCREAMING_SNAKE_CASE = {node: None for node in graph.connections} __SCREAMING_SNAKE_CASE = MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(a__ , a__ ) if priority_queue.is_empty(): return dist, parent # initialization __SCREAMING_SNAKE_CASE = priority_queue.extract_min() __SCREAMING_SNAKE_CASE = 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(a__ , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node # running prim's algorithm while not priority_queue.is_empty(): __SCREAMING_SNAKE_CASE = priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: __SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour] priority_queue.update_key(a__ , dist[neighbour] ) __SCREAMING_SNAKE_CASE = node return dist, parent
331
0
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCAmelCase : str = logging.getLogger(__name__) def _snake_case ( a__ , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = False , ): """simple docstring""" __SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit __SCREAMING_SNAKE_CASE = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) __SCREAMING_SNAKE_CASE = [] # custom device map if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(device_map.keys() ) > 1: __SCREAMING_SNAKE_CASE = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: __SCREAMING_SNAKE_CASE = get_keys_to_not_convert(lowerCAmelCase__ ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(lowerCAmelCase__ ) # compatibility with peft __SCREAMING_SNAKE_CASE = load_in_abit __SCREAMING_SNAKE_CASE = load_in_abit __SCREAMING_SNAKE_CASE = get_parameter_device(lowerCAmelCase__ ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) __SCREAMING_SNAKE_CASE = replace_with_bnb_layers(lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ ) # convert param to the right dtype __SCREAMING_SNAKE_CASE = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: __SCREAMING_SNAKE_CASE = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(lowerCAmelCase__ ): param.to(lowerCAmelCase__ ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( F'The model device type is {model_device.type}. However, cuda is needed for quantization.' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' ) else: with init_empty_weights(): __SCREAMING_SNAKE_CASE = replace_with_bnb_layers( lowerCAmelCase__ , lowerCAmelCase__ , modules_to_not_convert=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = get_quantized_model_device_map( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , max_memory=lowerCAmelCase__ , no_split_module_classes=lowerCAmelCase__ , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase__ , offload_state_dict=lowerCAmelCase__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(lowerCAmelCase__ , device_map=lowerCAmelCase__ , offload_dir=lowerCAmelCase__ ) def _snake_case ( a__ , a__ , a__=None , a__=None , a__=None ): """simple docstring""" if device_map is None: if torch.cuda.is_available(): __SCREAMING_SNAKE_CASE = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) __SCREAMING_SNAKE_CASE = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = special_dtypes __SCREAMING_SNAKE_CASE = no_split_module_classes __SCREAMING_SNAKE_CASE = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": __SCREAMING_SNAKE_CASE = get_balanced_memory( lowerCAmelCase__ , low_zero=(device_map == """balanced_low_0""") , max_memory=lowerCAmelCase__ , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE = max_memory __SCREAMING_SNAKE_CASE = infer_auto_device_map(lowerCAmelCase__ , **lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # check if don't have any quantized module on the cpu __SCREAMING_SNAKE_CASE = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules __SCREAMING_SNAKE_CASE = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def _snake_case ( a__ , a__ , a__=None , a__=None ): """simple docstring""" if modules_to_not_convert is None: __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _replace_with_bnb_layers( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _snake_case ( a__ , a__ , a__=None , a__=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE = False for name, module in model.named_children(): if current_key_name is None: __SCREAMING_SNAKE_CASE = [] current_key_name.append(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` __SCREAMING_SNAKE_CASE = """.""".join(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: __SCREAMING_SNAKE_CASE = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: __SCREAMING_SNAKE_CASE = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase__ , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: __SCREAMING_SNAKE_CASE = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) __SCREAMING_SNAKE_CASE = module.weight.data if module.bias is not None: __SCREAMING_SNAKE_CASE = module.bias.data bnb_module.requires_grad_(lowerCAmelCase__ ) setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = True if len(list(module.children() ) ) > 0: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = _replace_with_bnb_layers( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _snake_case ( a__ ): """simple docstring""" with init_empty_weights(): __SCREAMING_SNAKE_CASE = deepcopy(lowerCAmelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` __SCREAMING_SNAKE_CASE = find_tied_parameters(lowerCAmelCase__ ) # For compatibility with Accelerate < 0.18 if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): __SCREAMING_SNAKE_CASE = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __SCREAMING_SNAKE_CASE = sum(lowerCAmelCase__ , [] ) __SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) > 0 # Check if it is a base model __SCREAMING_SNAKE_CASE = False if hasattr(lowerCAmelCase__ , """base_model_prefix""" ): __SCREAMING_SNAKE_CASE = not hasattr(lowerCAmelCase__ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __SCREAMING_SNAKE_CASE = list(model.named_children() ) __SCREAMING_SNAKE_CASE = [list_modules[-1][0]] # add last module together with tied weights __SCREAMING_SNAKE_CASE = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = list(set(lowerCAmelCase__ ) ) + list(lowerCAmelCase__ ) # remove ".weight" from the keys __SCREAMING_SNAKE_CASE = [""".weight""", """.bias"""] __SCREAMING_SNAKE_CASE = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __SCREAMING_SNAKE_CASE = name.replace(lowerCAmelCase__ , """""" ) filtered_module_names.append(lowerCAmelCase__ ) return filtered_module_names def _snake_case ( a__ ): """simple docstring""" for m in model.modules(): if isinstance(lowerCAmelCase__ , bnb.nn.Linearabit ): return True return False def _snake_case ( a__ ): """simple docstring""" return next(parameter.parameters() ).device def _snake_case ( a__ , a__ , a__ , a__ , a__ , a__ , a__ ): """simple docstring""" if fpaa_statistics is None: set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , 0 , dtype=lowerCAmelCase__ , value=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE = param_name __SCREAMING_SNAKE_CASE = model if "." in tensor_name: __SCREAMING_SNAKE_CASE = tensor_name.split(""".""" ) for split in splits[:-1]: __SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if new_module is None: raise ValueError(F'{module} has no attribute {split}.' ) __SCREAMING_SNAKE_CASE = new_module __SCREAMING_SNAKE_CASE = splits[-1] # offload weights __SCREAMING_SNAKE_CASE = False offload_weight(module._parameters[tensor_name] , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , lowerCAmelCase__ , index=lowerCAmelCase__ , ) else: offload_weight(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , index=lowerCAmelCase__ ) offload_weight(lowerCAmelCase__ , param_name.replace("""weight""" , """SCB""" ) , lowerCAmelCase__ , index=lowerCAmelCase__ ) set_module_tensor_to_device(lowerCAmelCase__ , lowerCAmelCase__ , """meta""" , dtype=lowerCAmelCase__ , value=torch.empty(*param.size() ) )
360
'''simple docstring''' from __future__ import annotations from cmath import sqrt def a__ ( a__ , a__ , a__ ): """simple docstring""" if a == 0: raise ValueError("""Coefficient 'a' must not be zero.""" ) __SCREAMING_SNAKE_CASE = b * b - 4 * a * c __SCREAMING_SNAKE_CASE = (-b + sqrt(a__ )) / (2 * a) __SCREAMING_SNAKE_CASE = (-b - sqrt(a__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 ) print(F'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
331
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : str = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
361
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) # TODO: upload to AWS UpperCAmelCase : Optional[int] = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "retribert" def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any: """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = share_encoders __SCREAMING_SNAKE_CASE = projection_dim
331
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Dict=3 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : Optional[int]=400 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Dict=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Tuple=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : str=1 / 255 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333} __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = num_channels __SCREAMING_SNAKE_CASE = min_resolution __SCREAMING_SNAKE_CASE = max_resolution __SCREAMING_SNAKE_CASE = do_resize __SCREAMING_SNAKE_CASE = size __SCREAMING_SNAKE_CASE = do_normalize __SCREAMING_SNAKE_CASE = image_mean __SCREAMING_SNAKE_CASE = image_std __SCREAMING_SNAKE_CASE = do_rescale __SCREAMING_SNAKE_CASE = rescale_factor __SCREAMING_SNAKE_CASE = do_pad def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=False ) -> List[str]: """simple docstring""" if not batched: __SCREAMING_SNAKE_CASE = image_inputs[0] if isinstance(__SCREAMING_SNAKE_CASE , Image.Image ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.size else: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2] if w < h: __SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w ) __SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] elif w > h: __SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] __SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h ) else: __SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] __SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""] else: __SCREAMING_SNAKE_CASE = [] for image in image_inputs: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[0] )[0] __SCREAMING_SNAKE_CASE = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCAmelCase__ ( __snake_case , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ConditionalDetrImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_mean""" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """image_std""" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_normalize""" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """do_resize""" ) ) self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , """size""" ) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} ) self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __SCREAMING_SNAKE_CASE = image_processing(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {"""image_id""": 39_769, """annotations""": target} # encode them __SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" ) __SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([5887.9600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __SCREAMING_SNAKE_CASE ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __SCREAMING_SNAKE_CASE ) ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __SCREAMING_SNAKE_CASE ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __SCREAMING_SNAKE_CASE ) ) @slow def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: __SCREAMING_SNAKE_CASE = json.loads(f.read() ) __SCREAMING_SNAKE_CASE = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target} __SCREAMING_SNAKE_CASE = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them __SCREAMING_SNAKE_CASE = ConditionalDetrImageProcessor(format="""coco_panoptic""" ) __SCREAMING_SNAKE_CASE = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) # verify pixel values __SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __SCREAMING_SNAKE_CASE = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __SCREAMING_SNAKE_CASE ) ) # verify boxes __SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __SCREAMING_SNAKE_CASE = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __SCREAMING_SNAKE_CASE ) ) # verify is_crowd __SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __SCREAMING_SNAKE_CASE ) ) # verify class_labels __SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __SCREAMING_SNAKE_CASE ) ) # verify masks __SCREAMING_SNAKE_CASE = 822_873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __SCREAMING_SNAKE_CASE ) # verify orig_size __SCREAMING_SNAKE_CASE = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __SCREAMING_SNAKE_CASE ) ) # verify size __SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __SCREAMING_SNAKE_CASE ) )
362
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( a , a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = AltDiffusionPipeline lowerCAmelCase__ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , ) __SCREAMING_SNAKE_CASE = CLIPTextModel(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" ) __SCREAMING_SNAKE_CASE = 77 __SCREAMING_SNAKE_CASE = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict=0 ) -> List[str]: """simple docstring""" if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A photo of an astronaut""" __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE = self.get_dummy_components() __SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=__SCREAMING_SNAKE_CASE ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , ) # TODO: remove after fixing the non-deterministic text encoder __SCREAMING_SNAKE_CASE = RobertaSeriesModelWithTransformation(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = text_encoder __SCREAMING_SNAKE_CASE = AltDiffusionPipeline(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe(**__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE = np.array( [0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=20 , output_type="""np""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DDIMScheduler.from_pretrained("""BAAI/AltDiffusion""" , subfolder="""scheduler""" ) __SCREAMING_SNAKE_CASE = AltDiffusionPipeline.from_pretrained("""BAAI/AltDiffusion""" , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = alt_pipe.to(__SCREAMING_SNAKE_CASE ) alt_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger""" __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE = alt_pipe([prompt] , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""numpy""" ) __SCREAMING_SNAKE_CASE = output.images __SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
331
0
'''simple docstring''' class lowerCAmelCase__ : """simple docstring""" def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : list[int] ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = len(_a ) __SCREAMING_SNAKE_CASE = [0] * len_array if len_array > 0: __SCREAMING_SNAKE_CASE = array[0] for i in range(1 , _a ): __SCREAMING_SNAKE_CASE = self.prefix_sum[i - 1] + array[i] def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_a ) return False if __name__ == "__main__": import doctest doctest.testmod()
363
'''simple docstring''' import argparse import os import re import packaging.version UpperCAmelCase : Optional[int] = 'examples/' UpperCAmelCase : List[str] = { 'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'), 'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'), 'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'), 'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'), } UpperCAmelCase : Union[str, Any] = { 'init': 'src/diffusers/__init__.py', 'setup': 'setup.py', } UpperCAmelCase : Tuple = 'README.md' def a__ ( a__ , a__ , a__ ): """simple docstring""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS[pattern] __SCREAMING_SNAKE_CASE = replace.replace("""VERSION""" , a__ ) __SCREAMING_SNAKE_CASE = re_pattern.sub(a__ , a__ ) with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(a__ ) def a__ ( a__ ): """simple docstring""" for folder, directories, fnames in os.walk(a__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(a__ , a__ ) , a__ , pattern="""examples""" ) def a__ ( a__ , a__=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(a__ , a__ , a__ ) if not patch: update_version_in_examples(a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = """🤗 Transformers currently provides the following architectures""" __SCREAMING_SNAKE_CASE = """1. Want to contribute a new model?""" with open(a__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __SCREAMING_SNAKE_CASE = f.readlines() # Find the start of the list. __SCREAMING_SNAKE_CASE = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): __SCREAMING_SNAKE_CASE = lines[index].replace( """https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , ) index += 1 with open(a__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(a__ ) def a__ ( ): """simple docstring""" with open(REPLACE_FILES["""init"""] , """r""" ) as f: __SCREAMING_SNAKE_CASE = f.read() __SCREAMING_SNAKE_CASE = REPLACE_PATTERNS["""init"""][0].search(a__ ).groups()[0] return packaging.version.parse(a__ ) def a__ ( a__=False ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: __SCREAMING_SNAKE_CASE = default_version.base_version elif patch: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: __SCREAMING_SNAKE_CASE = F'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. __SCREAMING_SNAKE_CASE = input(F'Which version are you releasing? [{default_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = default_version print(F'Updating version to {version}.' ) global_version_update(a__ , patch=a__ ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_version() __SCREAMING_SNAKE_CASE = F'{current_version.major}.{current_version.minor + 1}.0.dev0' __SCREAMING_SNAKE_CASE = current_version.base_version # Check with the user we got that right. __SCREAMING_SNAKE_CASE = input(F'Which version are we developing now? [{dev_version}]' ) if len(a__ ) == 0: __SCREAMING_SNAKE_CASE = dev_version print(F'Updating version to {version}.' ) global_version_update(a__ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.') parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.') UpperCAmelCase : Dict = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('Nothing to do after a patch :-)') else: post_release_work()
331
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCAmelCase : Union[str, Any] = { '''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ '''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTNeoForCausalLM''', '''GPTNeoForQuestionAnswering''', '''GPTNeoForSequenceClassification''', '''GPTNeoForTokenClassification''', '''GPTNeoModel''', '''GPTNeoPreTrainedModel''', '''load_tf_weights_in_gpt_neo''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = [ '''FlaxGPTNeoForCausalLM''', '''FlaxGPTNeoModel''', '''FlaxGPTNeoPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
364
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : """simple docstring""" def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=99 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Optional[int]=5 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=36 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Any=0.0 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Dict=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=None , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 300 return config def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = MraModel(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MraForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = MraForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_choices __SCREAMING_SNAKE_CASE = MraForMultipleChoice(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = () def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = MraModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""MRA does not output attentions""" ) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" return @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" ) __SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" ) __SCREAMING_SNAKE_CASE = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_265 __SCREAMING_SNAKE_CASE = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" ) __SCREAMING_SNAKE_CASE = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_265 __SCREAMING_SNAKE_CASE = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
0
'''simple docstring''' def a__ ( a__ ): """simple docstring""" if not grid or not grid[0]: raise TypeError("""The grid does not contain the appropriate information""" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] __SCREAMING_SNAKE_CASE = grid[0] for row_n in range(1 , len(_UpperCAmelCase ) ): __SCREAMING_SNAKE_CASE = grid[row_n] __SCREAMING_SNAKE_CASE = fill_row(_UpperCAmelCase , _UpperCAmelCase ) __SCREAMING_SNAKE_CASE = grid[row_n] return grid[-1][-1] def a__ ( a__ , a__ ): """simple docstring""" current_row[0] += row_above[0] for cell_n in range(1 , len(_UpperCAmelCase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
365
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast UpperCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__) @dataclass class lowerCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase__ = 10000 lowerCAmelCase__ = None lowerCAmelCase__ = None class lowerCAmelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase__ = ParquetConfig def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) __SCREAMING_SNAKE_CASE = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple) ): __SCREAMING_SNAKE_CASE = data_files if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] __SCREAMING_SNAKE_CASE = [] for split_name, files in data_files.items(): if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = datasets.Features.from_arrow_schema(pq.read_schema(__SCREAMING_SNAKE_CASE ) ) break splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={"""files""": files} ) ) return splits def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pa.Table ) -> pa.Table: """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE = table_cast(__SCREAMING_SNAKE_CASE , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' ) for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE ) ): with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: __SCREAMING_SNAKE_CASE = pq.ParquetFile(__SCREAMING_SNAKE_CASE ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'{file_idx}_{batch_idx}', self._cast_table(__SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE )}: {e}' ) raise
331
0
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = set() __SCREAMING_SNAKE_CASE = [] def parse_line(a__ ): for line in fp: if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __SCREAMING_SNAKE_CASE = line.decode("""UTF-8""" ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(""" """ ): # process a single warning and move it to `selected_warnings`. if len(SCREAMING_SNAKE_CASE_ ) > 0: __SCREAMING_SNAKE_CASE = """\n""".join(SCREAMING_SNAKE_CASE_ ) # Only keep the warnings specified in `targets` if any(F': {x}: ' in warning for x in targets ): selected_warnings.add(SCREAMING_SNAKE_CASE_ ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE = line.strip() buffer.append(SCREAMING_SNAKE_CASE_ ) if from_gh: for filename in os.listdir(SCREAMING_SNAKE_CASE_ ): __SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename != "warnings.txt": continue with open(SCREAMING_SNAKE_CASE_ ) as fp: parse_line(SCREAMING_SNAKE_CASE_ ) else: try: with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z: for filename in z.namelist(): if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): # read the file if filename != "warnings.txt": continue with z.open(SCREAMING_SNAKE_CASE_ ) as fp: parse_line(SCREAMING_SNAKE_CASE_ ) except Exception: logger.warning( F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = set() __SCREAMING_SNAKE_CASE = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if (p.endswith(""".zip""" ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) return selected_warnings if __name__ == "__main__": def a__ ( a__ ): """simple docstring""" return values.split(""",""" ) UpperCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') # optional parameters parser.add_argument( '--targets', default='DeprecationWarning,UserWarning,FutureWarning', type=list_str, help='Comma-separated list of target warning(s) which we want to extract.', ) parser.add_argument( '--from_gh', action='store_true', help='If running from a GitHub action workflow and collecting warnings from its artifacts.', ) UpperCAmelCase : Tuple = parser.parse_args() UpperCAmelCase : int = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links UpperCAmelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('=' * 8_0) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts UpperCAmelCase : Dict = extract_warnings(args.output_dir, args.targets) UpperCAmelCase : Union[str, Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, 'selected_warnings.json'), 'w', encoding='UTF-8') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
366
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCAmelCase : Any = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] UpperCAmelCase : Optional[Any] = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] UpperCAmelCase : Optional[int] = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[str] = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) UpperCAmelCase : List[Any] = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def a__ ( a__ , a__ ): """simple docstring""" for tf_name, hf_name in patterns: __SCREAMING_SNAKE_CASE = k.replace(a__ , a__ ) return k def a__ ( a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**a__ ) __SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(a__ ) __SCREAMING_SNAKE_CASE = torch_model.state_dict() __SCREAMING_SNAKE_CASE = {} # separating decoder weights __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} __SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = DECODER_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): __SCREAMING_SNAKE_CASE = [k.endswith(a__ ) for ending in KEYS_TO_IGNORE] if any(a__ ): continue __SCREAMING_SNAKE_CASE = REMAINING_PATTERNS __SCREAMING_SNAKE_CASE = rename_state_dict_key(a__ , a__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): __SCREAMING_SNAKE_CASE = v.T __SCREAMING_SNAKE_CASE = torch.from_numpy(a__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' __SCREAMING_SNAKE_CASE = mapping["""model.embed_positions.weight"""] __SCREAMING_SNAKE_CASE = mapping.pop("""model.embed_positions.weight""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(a__ , strict=a__ ) __SCREAMING_SNAKE_CASE = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = tf.train.list_variables(a__ ) __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = ["""global_step"""] for name, shape in tqdm(a__ , desc="""converting tf checkpoint to dict""" ): __SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name ) if skip_key: continue __SCREAMING_SNAKE_CASE = tf.train.load_variable(a__ , a__ ) __SCREAMING_SNAKE_CASE = array return tf_weights def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(a__ ) __SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(a__ , a__ ) torch_model.save_pretrained(a__ ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCAmelCase : int = parser.parse_args() UpperCAmelCase : Dict = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
331
0
'''simple docstring''' import argparse import logging import pickle from collections import Counter logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__) if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser( description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)' ) parser.add_argument( '--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.' ) parser.add_argument( '--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.' ) parser.add_argument('--vocab_size', default=3_0_5_2_2, type=int) UpperCAmelCase : Dict = parser.parse_args() logger.info(f"""Loading data from {args.data_file}""") with open(args.data_file, 'rb') as fp: UpperCAmelCase : Tuple = pickle.load(fp) logger.info('Counting occurrences for MLM.') UpperCAmelCase : int = Counter() for tk_ids in data: counter.update(tk_ids) UpperCAmelCase : List[str] = [0] * args.vocab_size for k, v in counter.items(): UpperCAmelCase : Tuple = v logger.info(f"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, 'wb') as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
367
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(a ) class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : str ) -> Any: """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Any=None ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = {} __SCREAMING_SNAKE_CASE = {} if prompt is not None: __SCREAMING_SNAKE_CASE = prompt if generate_kwargs is not None: __SCREAMING_SNAKE_CASE = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __SCREAMING_SNAKE_CASE = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) __SCREAMING_SNAKE_CASE = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : int , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> int: """simple docstring""" return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = load_image(__SCREAMING_SNAKE_CASE ) if prompt is not None: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise ValueError( f'Received an invalid text input, got - {type(__SCREAMING_SNAKE_CASE )} - but expected a single string. ' """Note also that one single text can be provided for conditional image to text generation.""" ) __SCREAMING_SNAKE_CASE = self.model.config.model_type if model_type == "git": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids __SCREAMING_SNAKE_CASE = [self.tokenizer.cls_token_id] + input_ids __SCREAMING_SNAKE_CASE = torch.tensor(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , header_text=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __SCREAMING_SNAKE_CASE = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) model_inputs.update(__SCREAMING_SNAKE_CASE ) else: raise ValueError(f'Model type {model_type} does not support conditional text generation' ) else: __SCREAMING_SNAKE_CASE = self.image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: __SCREAMING_SNAKE_CASE = None return model_inputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any]=None ) -> List[str]: """simple docstring""" if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] , __SCREAMING_SNAKE_CASE ) and all(x is None for x in model_inputs["""input_ids"""] ) ): __SCREAMING_SNAKE_CASE = None if generate_kwargs is None: __SCREAMING_SNAKE_CASE = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __SCREAMING_SNAKE_CASE = model_inputs.pop(self.model.main_input_name ) __SCREAMING_SNAKE_CASE = self.model.generate(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return model_outputs def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [] for output_ids in model_outputs: __SCREAMING_SNAKE_CASE = { """generated_text""": self.tokenizer.decode( __SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , ) } records.append(__SCREAMING_SNAKE_CASE ) return records
331
0
'''simple docstring''' import warnings from ..trainer import Trainer from ..utils import logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) class lowerCAmelCase__ ( A_ ): """simple docstring""" def __init__( self : int , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: """simple docstring""" warnings.warn( """`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """ """instead.""" , _lowerCamelCase , ) super().__init__(args=_lowerCamelCase , **_lowerCamelCase )
368
'''simple docstring''' def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = len(a__ ) while cur > 1: # Find the maximum number in arr __SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi __SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )] # Reverse whole list __SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )] cur -= 1 return arr if __name__ == "__main__": UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase : str = [int(item) for item in user_input.split(',')] print(pancake_sort(unsorted))
331
0
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = (DDPMParallelScheduler,) def UpperCAmelCase__ ( self : str , **__SCREAMING_SNAKE_CASE : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = { "num_train_timesteps": 1_000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**_a ) return config def UpperCAmelCase__ ( self : Tuple ) -> Any: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=_a ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_a , beta_end=_a ) def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_a ) def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=_a ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_a ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" self.check_over_configs(thresholding=_a ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=_a , prediction_type=_a , sample_max_value=_a , ) def UpperCAmelCase__ ( self : List[str] ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=_a ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=_a ) def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = len(_a ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = self.dummy_sample_deter + 0.1 __SCREAMING_SNAKE_CASE = self.dummy_sample_deter - 0.1 __SCREAMING_SNAKE_CASE = samplea.shape[0] __SCREAMING_SNAKE_CASE = torch.stack([samplea, samplea, samplea] , dim=0 ) __SCREAMING_SNAKE_CASE = torch.arange(_a )[0:3, None].repeat(1 , _a ) __SCREAMING_SNAKE_CASE = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) __SCREAMING_SNAKE_CASE = scheduler.batch_step_no_noise(_a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2 assert abs(result_mean.item() - 0.5005 ) < 1E-3 def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = len(_a ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(_a ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(_a , _a ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(_a , _a , _a , generator=_a ).prev_sample __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3372 ) < 1E-3 def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" ) __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = len(_a ) __SCREAMING_SNAKE_CASE = self.dummy_model() __SCREAMING_SNAKE_CASE = self.dummy_sample_deter __SCREAMING_SNAKE_CASE = torch.manual_seed(0 ) for t in reversed(range(_a ) ): # 1. predict noise residual __SCREAMING_SNAKE_CASE = model(_a , _a ) # 2. predict previous mean of sample x_t-1 __SCREAMING_SNAKE_CASE = scheduler.step(_a , _a , _a , generator=_a ).prev_sample __SCREAMING_SNAKE_CASE = pred_prev_sample __SCREAMING_SNAKE_CASE = torch.sum(torch.abs(_a ) ) __SCREAMING_SNAKE_CASE = torch.mean(torch.abs(_a ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2631 ) < 1E-3 def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=_a ) __SCREAMING_SNAKE_CASE = scheduler.timesteps for i, timestep in enumerate(_a ): if i == len(_a ) - 1: __SCREAMING_SNAKE_CASE = -1 else: __SCREAMING_SNAKE_CASE = timesteps[i + 1] __SCREAMING_SNAKE_CASE = scheduler.previous_timestep(_a ) __SCREAMING_SNAKE_CASE = prev_t.item() self.assertEqual(_a , _a ) def UpperCAmelCase__ ( self : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0] with self.assertRaises(_a , msg="""`custom_timesteps` must be in descending order.""" ): scheduler.set_timesteps(timesteps=_a ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0] __SCREAMING_SNAKE_CASE = len(_a ) with self.assertRaises(_a , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ): scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a ) def UpperCAmelCase__ ( self : Optional[int] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.scheduler_classes[0] __SCREAMING_SNAKE_CASE = self.get_scheduler_config() __SCREAMING_SNAKE_CASE = scheduler_class(**_a ) __SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps] with self.assertRaises( _a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ): scheduler.set_timesteps(timesteps=_a )
369
'''simple docstring''' import os # Precomputes a list of the 100 first triangular numbers UpperCAmelCase : int = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = os.path.dirname(os.path.realpath(a__ ) ) __SCREAMING_SNAKE_CASE = os.path.join(a__ , """words.txt""" ) __SCREAMING_SNAKE_CASE = """""" with open(a__ ) as f: __SCREAMING_SNAKE_CASE = f.readline() __SCREAMING_SNAKE_CASE = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] __SCREAMING_SNAKE_CASE = [ word for word in [sum(ord(a__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(a__ ) if __name__ == "__main__": print(solution())
331
0
'''simple docstring''' import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask UpperCAmelCase : Optional[int] = logging.getLogger(__name__) class lowerCAmelCase__ ( _a ): """simple docstring""" lowerCAmelCase__ = "token-classification" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict ) -> List[str]: """simple docstring""" if type(_a ) == dict: __SCREAMING_SNAKE_CASE = Namespace(**_a ) __SCREAMING_SNAKE_CASE = import_module("""tasks""" ) try: __SCREAMING_SNAKE_CASE = getattr(_a , hparams.task_type ) __SCREAMING_SNAKE_CASE = token_classification_task_clazz() except AttributeError: raise ValueError( f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ' f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' ) __SCREAMING_SNAKE_CASE = self.token_classification_task.get_labels(hparams.labels ) __SCREAMING_SNAKE_CASE = CrossEntropyLoss().ignore_index super().__init__(_a , len(self.labels ) , self.mode ) def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict: """simple docstring""" return self.model(**_a ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": __SCREAMING_SNAKE_CASE = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids __SCREAMING_SNAKE_CASE = self(**_a ) __SCREAMING_SNAKE_CASE = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE = self.hparams for mode in ["train", "dev", "test"]: __SCREAMING_SNAKE_CASE = self._feature_file(_a ) if os.path.exists(_a ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , _a ) __SCREAMING_SNAKE_CASE = torch.load(_a ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) __SCREAMING_SNAKE_CASE = self.token_classification_task.read_examples_from_file(args.data_dir , _a ) __SCREAMING_SNAKE_CASE = self.token_classification_task.convert_examples_to_features( _a , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_a , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , _a ) torch.save(_a , _a ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self._feature_file(_a ) logger.info("""Loading features from cached file %s""" , _a ) __SCREAMING_SNAKE_CASE = torch.load(_a ) __SCREAMING_SNAKE_CASE = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) __SCREAMING_SNAKE_CASE = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: __SCREAMING_SNAKE_CASE = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: __SCREAMING_SNAKE_CASE = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) __SCREAMING_SNAKE_CASE = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(_a , _a , _a , _a ) , batch_size=_a ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]: """simple docstring""" """Compute validation""" "" __SCREAMING_SNAKE_CASE = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type != "distilbert": __SCREAMING_SNAKE_CASE = ( batch[2] if self.config.model_type in ["bert", "xlnet"] else None ) # XLM and RoBERTa don"t use token_type_ids __SCREAMING_SNAKE_CASE = self(**_a ) __SCREAMING_SNAKE_CASE = outputs[:2] __SCREAMING_SNAKE_CASE = logits.detach().cpu().numpy() __SCREAMING_SNAKE_CASE = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = torch.stack([x["""val_loss"""] for x in outputs] ).mean() __SCREAMING_SNAKE_CASE = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) __SCREAMING_SNAKE_CASE = np.argmax(_a , axis=2 ) __SCREAMING_SNAKE_CASE = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) __SCREAMING_SNAKE_CASE = dict(enumerate(self.labels ) ) __SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )] __SCREAMING_SNAKE_CASE = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) __SCREAMING_SNAKE_CASE = { "val_loss": val_loss_mean, "accuracy_score": accuracy_score(_a , _a ), "precision": precision_score(_a , _a ), "recall": recall_score(_a , _a ), "f1": fa_score(_a , _a ), } __SCREAMING_SNAKE_CASE = dict(results.items() ) __SCREAMING_SNAKE_CASE = results return ret, preds_list, out_label_list def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self._eval_end(_a ) __SCREAMING_SNAKE_CASE = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self._eval_end(_a ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 __SCREAMING_SNAKE_CASE = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple: """simple docstring""" BaseTransformer.add_model_specific_args(_a , _a ) parser.add_argument( """--task_type""" , default="""NER""" , type=_a , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=128 , type=_a , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=_a , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=_a , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": UpperCAmelCase : Tuple = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) UpperCAmelCase : List[Any] = NERTransformer.add_model_specific_args(parser, os.getcwd()) UpperCAmelCase : Union[str, Any] = parser.parse_args() UpperCAmelCase : Tuple = NERTransformer(args) UpperCAmelCase : Optional[Any] = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 UpperCAmelCase : Dict = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True)) UpperCAmelCase : Optional[int] = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
370
'''simple docstring''' class lowerCAmelCase__ : # Public class to implement a graph """simple docstring""" def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = row __SCREAMING_SNAKE_CASE = col __SCREAMING_SNAKE_CASE = graph def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> bool: """simple docstring""" return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[list[bool]] ) -> None: """simple docstring""" __SCREAMING_SNAKE_CASE = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __SCREAMING_SNAKE_CASE = [-1, 0, 1, -1, 1, -1, 0, 1] __SCREAMING_SNAKE_CASE = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Tuple ) -> int: # And finally, count all islands. """simple docstring""" __SCREAMING_SNAKE_CASE = [[False for j in range(self.COL )] for i in range(self.ROW )] __SCREAMING_SNAKE_CASE = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) count += 1 return count
331
0
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer UpperCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase : Optional[Any] = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n >>> repo = \"openai/shap-e-img2img\"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"\n >>> image = load_image(image_url).convert(\"RGB\")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")\n ```\n' @dataclass class lowerCAmelCase__ ( _a ): """simple docstring""" lowerCAmelCase__ = 42 class lowerCAmelCase__ ( _a ): """simple docstring""" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : PriorTransformer , __SCREAMING_SNAKE_CASE : CLIPVisionModel , __SCREAMING_SNAKE_CASE : CLIPImageProcessor , __SCREAMING_SNAKE_CASE : HeunDiscreteScheduler , __SCREAMING_SNAKE_CASE : ShapERenderer , ) -> Any: """simple docstring""" super().__init__() self.register_modules( prior=snake_case_ , image_encoder=snake_case_ , image_processor=snake_case_ , scheduler=snake_case_ , renderer=snake_case_ , ) def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]: """simple docstring""" if latents is None: __SCREAMING_SNAKE_CASE = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) __SCREAMING_SNAKE_CASE = latents.to(snake_case_ ) __SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma return latents def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str]=0 ) -> Optional[int]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) __SCREAMING_SNAKE_CASE = torch.device(f'cuda:{gpu_id}' ) __SCREAMING_SNAKE_CASE = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case_ , snake_case_ ) @property def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(snake_case_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , ) -> int: """simple docstring""" if isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , torch.Tensor ): __SCREAMING_SNAKE_CASE = torch.cat(snake_case_ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case_ , axis=0 ) if not isinstance(snake_case_ , torch.Tensor ): __SCREAMING_SNAKE_CASE = self.image_processor(snake_case_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) __SCREAMING_SNAKE_CASE = image.to(dtype=self.image_encoder.dtype , device=snake_case_ ) __SCREAMING_SNAKE_CASE = self.image_encoder(snake_case_ )["""last_hidden_state"""] __SCREAMING_SNAKE_CASE = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 __SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(snake_case_ , dim=0 ) if do_classifier_free_guidance: __SCREAMING_SNAKE_CASE = torch.zeros_like(snake_case_ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(snake_case_ ) def __call__( self : Dict , __SCREAMING_SNAKE_CASE : Union[PIL.Image.Image, List[PIL.Image.Image]] , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : int = 25 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : float = 4.0 , __SCREAMING_SNAKE_CASE : int = 64 , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ) -> Tuple: """simple docstring""" if isinstance(snake_case_ , PIL.Image.Image ): __SCREAMING_SNAKE_CASE = 1 elif isinstance(snake_case_ , torch.Tensor ): __SCREAMING_SNAKE_CASE = image.shape[0] elif isinstance(snake_case_ , snake_case_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): __SCREAMING_SNAKE_CASE = len(snake_case_ ) else: raise ValueError( f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case_ )}' ) __SCREAMING_SNAKE_CASE = self._execution_device __SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt __SCREAMING_SNAKE_CASE = guidance_scale > 1.0 __SCREAMING_SNAKE_CASE = self._encode_image(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # prior self.scheduler.set_timesteps(snake_case_ , device=snake_case_ ) __SCREAMING_SNAKE_CASE = self.scheduler.timesteps __SCREAMING_SNAKE_CASE = self.prior.config.num_embeddings __SCREAMING_SNAKE_CASE = self.prior.config.embedding_dim __SCREAMING_SNAKE_CASE = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim __SCREAMING_SNAKE_CASE = latents.reshape(latents.shape[0] , snake_case_ , snake_case_ ) for i, t in enumerate(self.progress_bar(snake_case_ ) ): # expand the latents if we are doing classifier free guidance __SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(snake_case_ , snake_case_ ) __SCREAMING_SNAKE_CASE = self.prior( snake_case_ , timestep=snake_case_ , proj_embedding=snake_case_ , ).predicted_image_embedding # remove the variance __SCREAMING_SNAKE_CASE = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 ) __SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) __SCREAMING_SNAKE_CASE = self.scheduler.step( snake_case_ , timestep=snake_case_ , sample=snake_case_ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=snake_case_ ) __SCREAMING_SNAKE_CASE = [] for i, latent in enumerate(snake_case_ ): print() __SCREAMING_SNAKE_CASE = self.renderer.decode( latent[None, :] , snake_case_ , size=snake_case_ , ray_batch_size=4_096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(snake_case_ ) __SCREAMING_SNAKE_CASE = torch.stack(snake_case_ ) if output_type not in ["np", "pil"]: raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' ) __SCREAMING_SNAKE_CASE = images.cpu().numpy() if output_type == "pil": __SCREAMING_SNAKE_CASE = [self.numpy_to_pil(snake_case_ ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=snake_case_ )
371
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[Any]=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Tuple=37 , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Optional[Any]=16 , __SCREAMING_SNAKE_CASE : Optional[Any]=2 , __SCREAMING_SNAKE_CASE : Tuple=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=4 , ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_attention_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_choices def UpperCAmelCase__ ( self : Dict ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_attention_mask: __SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class lowerCAmelCase__ ( a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = True lowerCAmelCase__ = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerModelTester(self ) @slow def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" for model_class_name in self.all_model_classes: __SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_flax class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" ) __SCREAMING_SNAKE_CASE = jnp.array([[0, 1, 2, 3, 4, 5]] ) __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = 50_000 __SCREAMING_SNAKE_CASE = (1, 6, vocab_size) self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = jnp.array( [[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
331
0
from __future__ import annotations def a__ ( a__ , a__ , a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = list(range(len(__lowerCAmelCase ) ) ) __SCREAMING_SNAKE_CASE = [v / w for v, w in zip(__lowerCAmelCase , __lowerCAmelCase )] index.sort(key=lambda a__ : ratio[i] , reverse=__lowerCAmelCase ) __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = [0] * len(__lowerCAmelCase ) for i in index: if weight[i] <= capacity: __SCREAMING_SNAKE_CASE = 1 max_value += value[i] capacity -= weight[i] else: __SCREAMING_SNAKE_CASE = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
350
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json', 'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json', } class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = "markuplm" def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple=30_522 , __SCREAMING_SNAKE_CASE : Optional[Any]=768 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=256 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_024 , __SCREAMING_SNAKE_CASE : Dict=216 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1_001 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : str=50 , __SCREAMING_SNAKE_CASE : int="absolute" , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple: """simple docstring""" super().__init__( pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = position_embedding_type __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = classifier_dropout # additional properties __SCREAMING_SNAKE_CASE = max_depth __SCREAMING_SNAKE_CASE = max_xpath_tag_unit_embeddings __SCREAMING_SNAKE_CASE = max_xpath_subs_unit_embeddings __SCREAMING_SNAKE_CASE = tag_pad_id __SCREAMING_SNAKE_CASE = subs_pad_id __SCREAMING_SNAKE_CASE = xpath_unit_hidden_size
331
0
'''simple docstring''' from maths.prime_factors import prime_factors def a__ ( a__ ): """simple docstring""" if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): __SCREAMING_SNAKE_CASE = F'Input value of [number={number}] must be an integer' raise TypeError(_UpperCAmelCase ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(_UpperCAmelCase ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
351
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[str] = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
331
0
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) # TODO: upload to AWS UpperCAmelCase : Optional[Any] = { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json""" ), } class lowerCAmelCase__ ( lowerCAmelCase_ ): """simple docstring""" lowerCAmelCase__ = """retribert""" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=30_522 , __SCREAMING_SNAKE_CASE : List[str]=768 , __SCREAMING_SNAKE_CASE : Dict=8 , __SCREAMING_SNAKE_CASE : Dict=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : str=2 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[int]=128 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = layer_norm_eps __SCREAMING_SNAKE_CASE = share_encoders __SCREAMING_SNAKE_CASE = projection_dim
352
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__SCREAMING_SNAKE_CASE ): DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is False and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(3 ) __SCREAMING_SNAKE_CASE = stepped is True and completed is True and reset is False self.assertTrue(__SCREAMING_SNAKE_CASE ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
331
0
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt') UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ : """simple docstring""" lowerCAmelCase__ = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) lowerCAmelCase__ = field( default=snake_case_ , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) lowerCAmelCase__ = field( default=snake_case_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) lowerCAmelCase__ = field( default=snake_case_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) lowerCAmelCase__ = field( default=snake_case_ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) @dataclass class lowerCAmelCase__ : """simple docstring""" lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} ) lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "Train language if it is different from the evaluation language."} ) lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , ) lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) lowerCAmelCase__ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) lowerCAmelCase__ = field( default=snake_case_ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) lowerCAmelCase__ = field( default=snake_case_ , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_xnli""" , lowerCamelCase__ ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE = training_args.get_process_log_level() logger.setLevel(lowerCamelCase__ ) datasets.utils.logging.set_verbosity(lowerCamelCase__ ) transformers.utils.logging.set_verbosity(lowerCamelCase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: __SCREAMING_SNAKE_CASE = load_dataset( """xnli""" , model_args.language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: __SCREAMING_SNAKE_CASE = load_dataset( """xnli""" , model_args.train_language , split="""train""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __SCREAMING_SNAKE_CASE = train_dataset.features["""label"""].names if training_args.do_eval: __SCREAMING_SNAKE_CASE = load_dataset( """xnli""" , model_args.language , split="""validation""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __SCREAMING_SNAKE_CASE = eval_dataset.features["""label"""].names if training_args.do_predict: __SCREAMING_SNAKE_CASE = load_dataset( """xnli""" , model_args.language , split="""test""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) __SCREAMING_SNAKE_CASE = predict_dataset.features["""label"""].names # Labels __SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase__ , idalabel={str(lowerCamelCase__ ): label for i, label in enumerate(lowerCamelCase__ )} , labelaid={label: i for i, label in enumerate(lowerCamelCase__ )} , finetuning_task="""xnli""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = """max_length""" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch __SCREAMING_SNAKE_CASE = False def preprocess_function(a__ ): # Tokenize the texts return tokenizer( examples["""premise"""] , examples["""hypothesis"""] , padding=lowerCamelCase__ , max_length=data_args.max_seq_length , truncation=lowerCamelCase__ , ) if training_args.do_train: if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE = min(len(lowerCamelCase__ ) , data_args.max_train_samples ) __SCREAMING_SNAKE_CASE = train_dataset.select(range(lowerCamelCase__ ) ) with training_args.main_process_first(desc="""train dataset map pre-processing""" ): __SCREAMING_SNAKE_CASE = train_dataset.map( lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on train dataset""" , ) # Log a few random samples from the training set: for index in random.sample(range(len(lowerCamelCase__ ) ) , 3 ): logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' ) if training_args.do_eval: if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE = min(len(lowerCamelCase__ ) , data_args.max_eval_samples ) __SCREAMING_SNAKE_CASE = eval_dataset.select(range(lowerCamelCase__ ) ) with training_args.main_process_first(desc="""validation dataset map pre-processing""" ): __SCREAMING_SNAKE_CASE = eval_dataset.map( lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on validation dataset""" , ) if training_args.do_predict: if data_args.max_predict_samples is not None: __SCREAMING_SNAKE_CASE = min(len(lowerCamelCase__ ) , data_args.max_predict_samples ) __SCREAMING_SNAKE_CASE = predict_dataset.select(range(lowerCamelCase__ ) ) with training_args.main_process_first(desc="""prediction dataset map pre-processing""" ): __SCREAMING_SNAKE_CASE = predict_dataset.map( lowerCamelCase__ , batched=lowerCamelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on prediction dataset""" , ) # Get the metric function __SCREAMING_SNAKE_CASE = evaluate.load("""xnli""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(a__ ): __SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , lowerCamelCase__ ) else p.predictions __SCREAMING_SNAKE_CASE = np.argmax(lowerCamelCase__ , axis=1 ) return metric.compute(predictions=lowerCamelCase__ , references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: __SCREAMING_SNAKE_CASE = default_data_collator elif training_args.fpaa: __SCREAMING_SNAKE_CASE = DataCollatorWithPadding(lowerCamelCase__ , pad_to_multiple_of=8 ) else: __SCREAMING_SNAKE_CASE = None # Initialize our Trainer __SCREAMING_SNAKE_CASE = Trainer( model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCamelCase__ , tokenizer=lowerCamelCase__ , data_collator=lowerCamelCase__ , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE = last_checkpoint __SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=lowerCamelCase__ ) __SCREAMING_SNAKE_CASE = train_result.metrics __SCREAMING_SNAKE_CASE = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCamelCase__ ) ) __SCREAMING_SNAKE_CASE = min(lowerCamelCase__ , len(lowerCamelCase__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("""train""" , lowerCamelCase__ ) trainer.save_metrics("""train""" , lowerCamelCase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=lowerCamelCase__ ) __SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCamelCase__ ) __SCREAMING_SNAKE_CASE = min(lowerCamelCase__ , len(lowerCamelCase__ ) ) trainer.log_metrics("""eval""" , lowerCamelCase__ ) trainer.save_metrics("""eval""" , lowerCamelCase__ ) # Prediction if training_args.do_predict: logger.info("""*** Predict ***""" ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = trainer.predict(lowerCamelCase__ , metric_key_prefix="""predict""" ) __SCREAMING_SNAKE_CASE = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowerCamelCase__ ) ) __SCREAMING_SNAKE_CASE = min(lowerCamelCase__ , len(lowerCamelCase__ ) ) trainer.log_metrics("""predict""" , lowerCamelCase__ ) trainer.save_metrics("""predict""" , lowerCamelCase__ ) __SCREAMING_SNAKE_CASE = np.argmax(lowerCamelCase__ , axis=1 ) __SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predictions.txt""" ) if trainer.is_world_process_zero(): with open(lowerCamelCase__ , """w""" ) as writer: writer.write("""index\tprediction\n""" ) for index, item in enumerate(lowerCamelCase__ ): __SCREAMING_SNAKE_CASE = label_list[item] writer.write(F'{index}\t{item}\n' ) if __name__ == "__main__": main()
353
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ ( a ): """simple docstring""" def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Optional[Any]=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Any=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : Optional[int]=37 , __SCREAMING_SNAKE_CASE : str="gelu" , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Tuple=512 , __SCREAMING_SNAKE_CASE : Tuple=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]="None" , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = parent __SCREAMING_SNAKE_CASE = batch_size __SCREAMING_SNAKE_CASE = seq_length __SCREAMING_SNAKE_CASE = is_training __SCREAMING_SNAKE_CASE = use_input_mask __SCREAMING_SNAKE_CASE = use_token_type_ids __SCREAMING_SNAKE_CASE = use_labels __SCREAMING_SNAKE_CASE = vocab_size __SCREAMING_SNAKE_CASE = hidden_size __SCREAMING_SNAKE_CASE = num_hidden_layers __SCREAMING_SNAKE_CASE = num_attention_heads __SCREAMING_SNAKE_CASE = intermediate_size __SCREAMING_SNAKE_CASE = hidden_act __SCREAMING_SNAKE_CASE = hidden_dropout_prob __SCREAMING_SNAKE_CASE = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE = max_position_embeddings __SCREAMING_SNAKE_CASE = type_vocab_size __SCREAMING_SNAKE_CASE = type_sequence_label_size __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = num_labels __SCREAMING_SNAKE_CASE = num_choices __SCREAMING_SNAKE_CASE = relative_attention __SCREAMING_SNAKE_CASE = position_biased_input __SCREAMING_SNAKE_CASE = pos_att_type __SCREAMING_SNAKE_CASE = scope def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE = None if self.use_input_mask: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) __SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None if self.use_labels: __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.get_config() __SCREAMING_SNAKE_CASE = 300 return config def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]: """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )[0] __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForMaskedLM(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForSequenceClassification(__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = self.num_labels __SCREAMING_SNAKE_CASE = DebertaForTokenClassification(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) model.to(__SCREAMING_SNAKE_CASE ) model.eval() __SCREAMING_SNAKE_CASE = model( __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ( __SCREAMING_SNAKE_CASE ) , ) = config_and_inputs __SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( a , a , unittest.TestCase ): """simple docstring""" lowerCAmelCase__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = True lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModelTester(self ) __SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*__SCREAMING_SNAKE_CASE ) @slow def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) self.assertIsNotNone(__SCREAMING_SNAKE_CASE ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass @slow def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) __SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. __SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) , f'{output[:, 1:4, 1:4]}' )
331
0
'''simple docstring''' from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCAmelCase : Optional[Any] = TypeVar('T') class lowerCAmelCase__ ( Generic[T] ): """simple docstring""" lowerCAmelCase__ = 42 # Cache store of keys lowerCAmelCase__ = 42 # References of the keys in cache lowerCAmelCase__ = 10 # Maximum capacity of cache def __init__( self : Any , __SCREAMING_SNAKE_CASE : int ) -> int: """simple docstring""" __SCREAMING_SNAKE_CASE = deque() __SCREAMING_SNAKE_CASE = set() if not n: __SCREAMING_SNAKE_CASE = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: __SCREAMING_SNAKE_CASE = n def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : T ) -> Union[str, Any]: """simple docstring""" if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: __SCREAMING_SNAKE_CASE = self.dq_store.pop() self.key_reference.remove(_lowerCAmelCase ) else: self.dq_store.remove(_lowerCAmelCase ) self.dq_store.appendleft(_lowerCAmelCase ) self.key_reference.add(_lowerCAmelCase ) def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" for k in self.dq_store: print(_lowerCAmelCase ) def __repr__( self : str ) -> Any: """simple docstring""" return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}' if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : Optional[Any] = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
354
'''simple docstring''' from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = analyze_text(a__ ) __SCREAMING_SNAKE_CASE = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. __SCREAMING_SNAKE_CASE = sum(single_char_strings.values() ) # one length string __SCREAMING_SNAKE_CASE = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: __SCREAMING_SNAKE_CASE = single_char_strings[ch] __SCREAMING_SNAKE_CASE = my_str / all_sum my_fir_sum += prob * math.loga(a__ ) # entropy formula. # print entropy print(F'{round(-1 * my_fir_sum ):.1f}' ) # two len string __SCREAMING_SNAKE_CASE = sum(two_char_strings.values() ) __SCREAMING_SNAKE_CASE = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: __SCREAMING_SNAKE_CASE = cha + cha if sequence in two_char_strings: __SCREAMING_SNAKE_CASE = two_char_strings[sequence] __SCREAMING_SNAKE_CASE = int(a__ ) / all_sum my_sec_sum += prob * math.loga(a__ ) # print second entropy print(F'{round(-1 * my_sec_sum ):.1f}' ) # print the difference between them print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' ) def a__ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE = Counter() # type: ignore __SCREAMING_SNAKE_CASE = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(a__ ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def a__ ( ): """simple docstring""" import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
331
0