code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def __a ( __lowerCamelCase, __lowerCamelCase ): while b: UpperCAmelCase_ , UpperCAmelCase_ : Dict = b, a % b return a def __a ( __lowerCamelCase, __lowerCamelCase ): return a if b == 0 else euclidean_gcd_recursive(__lowerCamelCase, a % b ) def __a ( ): print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}""" ) if __name__ == "__main__": main()
61
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """trocr""" __lowerCAmelCase = ["""past_key_values"""] __lowerCAmelCase = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = activation_function UpperCamelCase = max_position_embeddings UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = init_std UpperCamelCase = decoder_layerdrop UpperCamelCase = use_cache UpperCamelCase = scale_embedding UpperCamelCase = use_learned_position_embeddings UpperCamelCase = layernorm_embedding super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
343
0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =generate_pascal_triangle(SCREAMING_SNAKE_CASE__ ) for row_idx in range(SCREAMING_SNAKE_CASE__ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=' ' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=' ' ) else: print(triangle[row_idx][col_idx] , end='' ) print() def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) __UpperCamelCase =[] for current_row_idx in range(SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =populate_current_row(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) triangle.append(SCREAMING_SNAKE_CASE__ ) return triangle def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int ): __UpperCamelCase =[-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 __UpperCamelCase , __UpperCamelCase =1, 1 for current_col_idx in range(1 , SCREAMING_SNAKE_CASE__ ): calculate_current_element( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return current_row def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , ): __UpperCamelCase =triangle[current_row_idx - 1][current_col_idx - 1] __UpperCamelCase =triangle[current_row_idx - 1][current_col_idx] __UpperCamelCase =above_to_left_elt + above_to_right_elt def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) __UpperCamelCase =[[1]] for row_index in range(1 , SCREAMING_SNAKE_CASE__ ): __UpperCamelCase =[0] + result[-1] + [0] __UpperCamelCase =row_index + 1 # Calculate the number of distinct elements in a row __UpperCamelCase =sum(divmod(SCREAMING_SNAKE_CASE__ , 2 ) ) __UpperCamelCase =[ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] __UpperCamelCase =row_first_half[: (row_index + 1) // 2] row_second_half.reverse() __UpperCamelCase =row_first_half + row_second_half result.append(SCREAMING_SNAKE_CASE__ ) return result def _UpperCAmelCase ( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(SCREAMING_SNAKE_CASE__ : Callable , SCREAMING_SNAKE_CASE__ : int ) -> None: __UpperCamelCase =F'{func.__name__}({value})' __UpperCamelCase =timeit(F'__main__.{call}' , setup='import __main__' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'{call:38} -- {timing:.4f} seconds' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
62
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase = """swin""" __lowerCAmelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names ) class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return 1E-4
343
0
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCAmelCase_ : Dict = logging.getLogger(__name__) def _lowerCamelCase ( ) -> Optional[int]: _a = argparse.ArgumentParser( description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." ) parser.add_argument( "--dataset_name" , type=lowercase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , ) parser.add_argument( "--dataset_config" , type=lowercase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." ) parser.add_argument( "--tokenizer_name_or_path" , type=lowercase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , ) parser.add_argument( "--shard_size" , type=lowercase , default=1000 , help="Number of entries to go in a single shard." , ) parser.add_argument("--split" , type=lowercase , default="train" , choices=["train", "test", "validation"] ) parser.add_argument( "--limit" , default=lowercase , type=lowercase , help="Limit the number of shards (used for debugging)." , ) parser.add_argument( "--max_length" , type=lowercase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum" " sequence length that is a multiple of 8." , ) parser.add_argument( "--output_dir" , default="tf-tpu" , type=lowercase , help="Output directory where the TFRecord shards will be saved. If the" " path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord" " shards will be directly saved to a Google Cloud Storage bucket." , ) _a = parser.parse_args() return args def _lowerCamelCase ( lowercase : List[Any] ) -> Tuple: def fn(lowercase : Optional[Any] ): return tokenizer(examples["text"] ) return fn def _lowerCamelCase ( lowercase : List[Any] ) -> Dict: _a = [] for i in range(len(tokenized_data["input_ids"] ) ): _a = { "input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ), "attention_mask": tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ), } _a = tf.train.Features(feature=lowercase ) _a = tf.train.Example(features=lowercase ) _a = example.SerializeToString() records.append(lowercase ) return records def _lowerCamelCase ( lowercase : Dict ) -> str: _a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _a = min(len(lowercase ) , args.limit ) _a = dataset.select(range(lowercase ) ) print(F'Limiting the dataset to {args.limit} entries.' ) _a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _a = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowercase ): os.makedirs(lowercase ) else: _a = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _a = tokenize_function(lowercase ) _a = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=["text"] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowercase : Tuple ): # Concatenate all texts. _a = {k: sum(examples[k] , [] ) for k in examples.keys()} _a = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _a = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _a = { k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )] for k, t in concatenated_examples.items() } return result _a = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 ) _a = 0 _a = 0 for shard in range(0 , len(lowercase ) , args.shard_size ): _a = grouped_dataset[shard : shard + args.shard_size] _a = len(dataset_snapshot["input_ids"] ) _a = os.path.join(lowercase , F'dataset-{shard_count}-{records_containing}.tfrecord' ) _a = get_serialized_examples(lowercase ) with tf.io.TFRecordWriter(lowercase ) as out_file: for i in range(len(lowercase ) ): _a = serialized_examples[i] out_file.write(lowercase ) print("Wrote file {} containing {} records".format(lowercase , lowercase ) ) shard_count += 1 total_records += records_containing with open(F'split-{args.split}-records-count.txt' , "w" ) as f: print(F'Total {args.split} records: {total_records}' , file=lowercase ) if __name__ == "__main__": lowerCAmelCase_ : Optional[Any] = parse_args() main(args)
63
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) _SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) _SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(6_4, 6_4) ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) _SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) _SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _SCREAMING_SNAKE_CASE = """Normal""" if result[0][0] == 1: _SCREAMING_SNAKE_CASE = """Abnormality detected"""
343
0
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = ArgumentParser( description=( """PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=snake_case__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=snake_case__ ) return parser.parse_args() def UpperCAmelCase__ (): """simple docstring""" _snake_case : Union[str, Any] = parse_args() # Import training_script as a module. _snake_case : Dict = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) _snake_case : int = script_fpath.stem _snake_case : int = importlib.import_module(snake_case__ ) # Patch sys.argv _snake_case : List[str] = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
64
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): pass class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __iter__( self : Optional[int] ): """simple docstring""" UpperCamelCase = self UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data UpperCamelCase = node.next_node @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Node(1) _SCREAMING_SNAKE_CASE = Node(2) _SCREAMING_SNAKE_CASE = Node(3) _SCREAMING_SNAKE_CASE = Node(4) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = root_node.next_node print(root_node.has_loop) # True _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = Node(1) print(root_node.has_loop) # False
343
0
import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowerCAmelCase_ ( __A ) -> Dict: '''simple docstring''' UpperCAmelCase__ = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", "decoder.output_projection.weight", ] for k in ignore_keys: state_dict.pop(__A, __A ) def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A ) UpperCAmelCase__ = emb.weight.data return lin_layer def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple: '''simple docstring''' UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"] remove_ignore_keys_(__A ) UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0] UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A ) if mbart_aa and finetuned: UpperCAmelCase__ = "relu" UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"] UpperCAmelCase__ = MBartForConditionalGeneration(__A ) model.model.load_state_dict(__A ) if finetuned: UpperCAmelCase__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( 'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.' ) parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--hf_config', default='facebook/mbart-large-cc25', type=str, help='Which huggingface architecture to use: mbart-large', ) parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint') parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint') UpperCamelCase__ = parser.parse_args() UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
65
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , ) assert hasattr(self , """env""" ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = { """enabled""": True, """processes_per_host""": 8, } UpperCamelCase = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ): """simple docstring""" TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = self.create_estimator(lowerCamelCase_ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
343
0
"""simple docstring""" from __future__ import annotations __a = tuple[int, int, int] __a = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase __a = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" # -------------------------- default selection -------------------------- # rotors -------------------------- __a = "EGZWVONAHDCLFQMSIPJBYUKXTR" __a = "FOBHMDKEXQNRAULPGSJVTYICZW" __a = "ZJXESIUQLHAVRMDOYGTNFWPBKC" # reflector -------------------------- __a = { "A": "N", "N": "A", "B": "O", "O": "B", "C": "P", "P": "C", "D": "Q", "Q": "D", "E": "R", "R": "E", "F": "S", "S": "F", "G": "T", "T": "G", "H": "U", "U": "H", "I": "V", "V": "I", "J": "W", "W": "J", "K": "X", "X": "K", "L": "Y", "Y": "L", "M": "Z", "Z": "M", } # -------------------------- extra rotors -------------------------- __a = "RMDJXFUWGISLHVTCQNKYPBEZOA" __a = "SGLCPQWZHKXAREONTFBVIYJUDM" __a = "HVSICLTYKQUBXDWAJZOMFGPREN" __a = "RZWQHFMVDBKICJLNTUXAGYPSOE" __a = "LFKIJODBEGAMQPXVUHYSTCZRWN" __a = "KOAEGVDHXPQZMLFTYWJNBRCIUS" def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' if (unique_rotsel := len(set(_lowercase ) )) < 3: snake_case_ :Any = f"""Please use 3 unique rotors (not {unique_rotsel})""" raise Exception(_lowercase ) # Checks if rotor positions are valid snake_case_, snake_case_, snake_case_ :int = rotpos if not 0 < rotorposa <= len(_lowercase ): snake_case_ :List[Any] = f"""First rotor position is not within range of 1..26 ({rotorposa}""" raise ValueError(_lowercase ) if not 0 < rotorposa <= len(_lowercase ): snake_case_ :Tuple = f"""Second rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(_lowercase ) if not 0 < rotorposa <= len(_lowercase ): snake_case_ :str = f"""Third rotor position is not within range of 1..26 ({rotorposa})""" raise ValueError(_lowercase ) # Validates string and returns dict snake_case_ :Optional[Any] = _plugboard(_lowercase ) return rotpos, rotsel, pbdict def A_ ( _lowercase ): '''simple docstring''' if not isinstance(_lowercase, _lowercase ): snake_case_ :int = f"""Plugboard setting isn't type string ({type(_lowercase )})""" raise TypeError(_lowercase ) elif len(_lowercase ) % 2 != 0: snake_case_ :List[Any] = f"""Odd number of symbols ({len(_lowercase )})""" raise Exception(_lowercase ) elif pbstring == "": return {} pbstring.replace(""" """, """""" ) # Checks if all characters are unique snake_case_ :List[str] = set() for i in pbstring: if i not in abc: snake_case_ :Dict = f"""'{i}' not in list of symbols""" raise Exception(_lowercase ) elif i in tmppbl: snake_case_ :Dict = f"""Duplicate symbol ({i})""" raise Exception(_lowercase ) else: tmppbl.add(_lowercase ) del tmppbl # Created the dictionary snake_case_ :int = {} for j in range(0, len(_lowercase ) - 1, 2 ): snake_case_ :Dict = pbstring[j + 1] snake_case_ :List[Any] = pbstring[j] return pb def A_ ( _lowercase, _lowercase, _lowercase = (rotora, rotora, rotora), _lowercase = "", ): '''simple docstring''' snake_case_ :Tuple = text.upper() snake_case_, snake_case_, snake_case_ :Tuple = _validator( _lowercase, _lowercase, plugb.upper() ) snake_case_, snake_case_, snake_case_ :int = rotor_position snake_case_, snake_case_, snake_case_ :Tuple = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 snake_case_ :int = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: snake_case_ :Any = plugboard[symbol] # rotor ra -------------------------- snake_case_ :Optional[int] = abc.index(_lowercase ) + rotorposa snake_case_ :Any = rotora[index % len(_lowercase )] # rotor rb -------------------------- snake_case_ :List[Any] = abc.index(_lowercase ) + rotorposa snake_case_ :int = rotora[index % len(_lowercase )] # rotor rc -------------------------- snake_case_ :int = abc.index(_lowercase ) + rotorposa snake_case_ :List[Any] = rotora[index % len(_lowercase )] # reflector -------------------------- # this is the reason you don't need another machine to decipher snake_case_ :Union[str, Any] = reflector[symbol] # 2nd rotors snake_case_ :int = abc[rotora.index(_lowercase ) - rotorposa] snake_case_ :Dict = abc[rotora.index(_lowercase ) - rotorposa] snake_case_ :Union[str, Any] = abc[rotora.index(_lowercase ) - rotorposa] # 2nd plugboard if symbol in plugboard: snake_case_ :int = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_lowercase ): snake_case_ :List[Any] = 0 rotorposa += 1 if rotorposa >= len(_lowercase ): snake_case_ :str = 0 rotorposa += 1 if rotorposa >= len(_lowercase ): snake_case_ :Union[str, Any] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_lowercase ) return "".join(_lowercase ) if __name__ == "__main__": __a = "This is my Python script that emulates the Enigma machine from WWII." __a = (1, 1, 1) __a = "pictures" __a = (rotora, rotora, rotora) __a = enigma(message, rotor_pos, rotor_sel, pb) print("Encrypted message:", en) print("Decrypted message:", enigma(en, rotor_pos, rotor_sel, pb))
66
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
343
0
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase ={ "configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =["VivitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =[ "VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "VivitModel", "VivitPreTrainedModel", "VivitForVideoClassification", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
67
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ShapEPipeline __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __lowerCAmelCase = False @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return 8 @property def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(lowerCamelCase_ ) @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase = PriorTransformer(**lowerCamelCase_ ) return model @property def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase = ShapERenderer(**lowerCamelCase_ ) return model def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_text_encoder UpperCamelCase = self.dummy_tokenizer UpperCamelCase = self.dummy_renderer UpperCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , ) UpperCamelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ): """simple docstring""" if str(lowerCamelCase_ ).startswith("""mps""" ): UpperCamelCase = torch.manual_seed(lowerCamelCase_ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) UpperCamelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = """cpu""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) UpperCamelCase = output.images[0] UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self : Tuple ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch_device == """cpu""" UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase = batch_size * [inputs[key]] UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCamelCase = pipe( """a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
343
0
from collections import deque from math import floor from random import random from time import time class a__ : """simple docstring""" def __init__( self ) -> Dict: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple: '''simple docstring''' if self.graph.get(lowercase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: A__ = [[w, v]] if not self.graph.get(lowercase ): A__ = [] def UpperCamelCase ( self ) -> Optional[Any]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase , lowercase ) -> int: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Any: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' A__ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def UpperCamelCase ( self , lowercase ) -> int: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self , lowercase=-2 ) -> str: '''simple docstring''' A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s A__ = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return sorted_nodes def UpperCamelCase ( self ) -> List[Any]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> int: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin class a__ : """simple docstring""" def __init__( self ) -> int: '''simple docstring''' A__ = {} def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist A__ = [[w, v]] # add the other way if self.graph.get(lowercase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist A__ = [[w, u]] def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' if self.graph.get(lowercase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowercase ) # the other way round if self.graph.get(lowercase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowercase ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]: '''simple docstring''' if s == d: return [] A__ = [] A__ = [] if s == -2: A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowercase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return visited def UpperCamelCase ( self , lowercase=-1 ) -> str: '''simple docstring''' if c == -1: A__ = floor(random() * 10000 ) + 10 for i in range(lowercase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): A__ = floor(random() * c ) + 1 if n != i: self.add_pair(lowercase , lowercase , 1 ) def UpperCamelCase ( self , lowercase=-2 ) -> Dict: '''simple docstring''' A__ = deque() A__ = [] if s == -2: A__ = list(self.graph )[0] d.append(lowercase ) visited.append(lowercase ) while d: A__ = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def UpperCamelCase ( self , lowercase ) -> Tuple: '''simple docstring''' return len(self.graph[u] ) def UpperCamelCase ( self ) -> Dict: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return list(lowercase ) def UpperCamelCase ( self ) -> int: '''simple docstring''' A__ = [] A__ = [] A__ = list(self.graph )[0] stack.append(lowercase ) visited.append(lowercase ) A__ = -2 A__ = [] A__ = s A__ = False A__ = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: A__ = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): A__ = len(lowercase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) A__ = node[1] break # check if all the children are visited if s == ss: stack.pop() A__ = True if len(lowercase ) != 0: A__ = stack[len(lowercase ) - 1] else: A__ = False indirect_parents.append(lowercase ) A__ = s A__ = ss # check if se have reached the starting point if len(lowercase ) == 0: return False def UpperCamelCase ( self ) -> List[str]: '''simple docstring''' return list(self.graph ) def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]: '''simple docstring''' A__ = time() self.dfs(lowercase , lowercase ) A__ = time() return end - begin def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]: '''simple docstring''' A__ = time() self.bfs(lowercase ) A__ = time() return end - begin
68
from __future__ import annotations def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list: '''simple docstring''' UpperCamelCase = [] UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCamelCase = result + left + right return input_list def lowercase( UpperCamelCase_ ) -> list: '''simple docstring''' if len(UpperCamelCase_ ) <= 1: return input_list UpperCamelCase = list(UpperCamelCase_ ) # iteration for two-way merging UpperCamelCase = 2 while p <= len(UpperCamelCase_ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = i + p - 1 UpperCamelCase = (low + high + 1) // 2 UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # final merge of last two parts if p * 2 >= len(UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
343
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "roc_bert" def __init__( self, lowerCAmelCase__=3_0522, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=True, lowerCAmelCase__=0, lowerCAmelCase__="absolute", lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=768, lowerCAmelCase__=910, lowerCAmelCase__=512, lowerCAmelCase__=2_4858, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> List[str]: snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps snake_case_ = use_cache snake_case_ = enable_pronunciation snake_case_ = enable_shape snake_case_ = pronunciation_embed_dim snake_case_ = pronunciation_vocab_size snake_case_ = shape_embed_dim snake_case_ = shape_vocab_size snake_case_ = concat_input snake_case_ = position_embedding_type snake_case_ = classifier_dropout super().__init__(pad_token_id=lowerCAmelCase__, **lowerCAmelCase__)
69
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = num_groups def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = BitModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = BitForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowerCAmelCase = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""Bit does not output attentions""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(config=lowerCamelCase_ ) for name, module in model.named_modules(): if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def lowerCamelCase_ ( self : int ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): UpperCamelCase = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : int ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitBackbone,) if is_torch_available() else () __lowerCAmelCase = BitConfig __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self )
343
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ : Dict =logging.get_logger(__name__) A__ : Tuple ={ '''microsoft/swin-tiny-patch4-window7-224''': ( '''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json''' ), # See all Swin models at https://huggingface.co/models?filter=swin } class UpperCAmelCase ( snake_case_ , snake_case_ ): _lowercase: Optional[Any] = '''swin''' _lowercase: Union[str, Any] = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : List[Any] , __snake_case : Dict=2_24 , __snake_case : List[str]=4 , __snake_case : Optional[Any]=3 , __snake_case : Optional[int]=96 , __snake_case : Tuple=[2, 2, 6, 2] , __snake_case : Optional[int]=[3, 6, 12, 24] , __snake_case : Optional[Any]=7 , __snake_case : List[Any]=4.0 , __snake_case : List[Any]=True , __snake_case : Tuple=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Union[str, Any]=0.1 , __snake_case : Dict="gelu" , __snake_case : int=False , __snake_case : Optional[Any]=0.02 , __snake_case : Optional[Any]=1E-5 , __snake_case : str=32 , __snake_case : Any=None , __snake_case : Tuple=None , **__snake_case : Tuple , ) -> int: super().__init__(**__snake_case ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = embed_dim _lowerCAmelCase = depths _lowerCAmelCase = len(__snake_case ) _lowerCAmelCase = num_heads _lowerCAmelCase = window_size _lowerCAmelCase = mlp_ratio _lowerCAmelCase = qkv_bias _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = drop_path_rate _lowerCAmelCase = hidden_act _lowerCAmelCase = use_absolute_embeddings _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = initializer_range _lowerCAmelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCAmelCase = int(embed_dim * 2 ** (len(__snake_case ) - 1) ) _lowerCAmelCase = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(__snake_case ) + 1 )] _lowerCAmelCase , _lowerCAmelCase = get_aligned_output_features_output_indices( out_features=__snake_case , out_indices=__snake_case , stage_names=self.stage_names ) class UpperCAmelCase ( snake_case_ ): _lowercase: Optional[Any] = version.parse('''1.11''' ) @property def lowercase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowercase__ ( self : Union[str, Any] ) -> float: return 1E-4
70
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = TFResNetModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __lowerCAmelCase = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = TFResNetModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ): UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Any ): """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # forward pass UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
343
0
# using dfs for finding eulerian path traversal def A ( a_ ,a_ ,a_ ,a_=None ) -> Optional[Any]: __UpperCamelCase : List[Any] =(path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: __UpperCamelCase , __UpperCamelCase : List[str] =True, True __UpperCamelCase : Optional[Any] =dfs(a_ ,a_ ,a_ ,a_ ) return path def A ( a_ ,a_ ) -> Any: __UpperCamelCase : int =0 __UpperCamelCase : Optional[Any] =-1 for i in range(a_ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 __UpperCamelCase : str =i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def A ( a_ ,a_ ) -> Optional[Any]: __UpperCamelCase : Dict =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] __UpperCamelCase , __UpperCamelCase : Dict =check_circuit_or_path(a_ ,a_ ) if check == 3: print('graph is not Eulerian' ) print('no path' ) return __UpperCamelCase : List[Any] =1 if check == 2: __UpperCamelCase : int =odd_node print('graph has a Euler path' ) if check == 1: print('graph has a Euler cycle' ) __UpperCamelCase : str =dfs(a_ ,a_ ,a_ ) print(a_ ) def A ( ) -> Dict: __UpperCamelCase : str ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} __UpperCamelCase : Tuple ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} __UpperCamelCase : str ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} __UpperCamelCase : Dict ={1: [2, 3], 2: [1, 3], 3: [1, 2]} __UpperCamelCase : Union[str, Any] ={ 1: [], 2: [] # all degree is zero } __UpperCamelCase : List[Any] =10 check_euler(a_ ,a_ ) check_euler(a_ ,a_ ) check_euler(a_ ,a_ ) check_euler(a_ ,a_ ) check_euler(a_ ,a_ ) if __name__ == "__main__": main()
71
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val def lowercase( UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase = """""" if is_panoptic: UpperCamelCase = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCamelCase = """resnet101""" if "dc5" in model_name: UpperCamelCase = True UpperCamelCase = """panoptic""" in model_name if is_panoptic: UpperCamelCase = 250 else: UpperCamelCase = 91 UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """coco-detection-id2label.json""" UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} # load image processor UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection""" UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ ) # prepare image UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) UpperCamelCase = encoding["""pixel_values"""] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval() UpperCamelCase = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCamelCase = """conditional_detr.""" + src rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val # finally, create HuggingFace model and load state dict UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion UpperCamelCase = conditional_detr(UpperCamelCase_ ) UpperCamelCase = model(UpperCamelCase_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
343
0
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class __snake_case ( _lowercase): snake_case__ : List[Any] = "Speech2TextFeatureExtractor" snake_case__ : Union[str, Any] = "Speech2TextTokenizer" def __init__( self : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] ): """simple docstring""" super().__init__(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : str = False def __call__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : List[str] ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__lowerCAmelCase , **__lowerCAmelCase ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _lowerCamelCase : str = kwargs.pop('''raw_speech''' ) else: _lowerCamelCase : Tuple = kwargs.pop('''audio''' , __lowerCAmelCase ) _lowerCamelCase : Optional[Any] = kwargs.pop('''sampling_rate''' , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = kwargs.pop('''text''' , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0: _lowerCamelCase : List[Any] = args[0] _lowerCamelCase : int = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _lowerCamelCase : List[Any] = self.feature_extractor(__lowerCAmelCase , *__lowerCAmelCase , sampling_rate=__lowerCAmelCase , **__lowerCAmelCase ) if text is not None: _lowerCamelCase : List[Any] = self.tokenizer(__lowerCAmelCase , **__lowerCAmelCase ) if text is None: return inputs elif audio is None: return encodings else: _lowerCamelCase : List[str] = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[Any] , **__lowerCAmelCase : Tuple ): """simple docstring""" return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ): """simple docstring""" return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase ) @contextmanager def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _lowerCamelCase : Union[str, Any] = True _lowerCamelCase : Any = self.tokenizer yield _lowerCamelCase : List[str] = self.feature_extractor _lowerCamelCase : Tuple = False
72
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = mask_ratio UpperCamelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) # expected sequence length = num_patches UpperCamelCase = (self.image_size // self.patch_size) ** 2 UpperCamelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) UpperCamelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self : str ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = outputs_dict[0].numpy() UpperCamelCase = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ): UpperCamelCase = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCamelCase_ ): UpperCamelCase = v.numpy() else: UpperCamelCase = np.array(lowerCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.constant(lowerCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase = tf_noise super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(lowerCamelCase_ ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ ) } UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: UpperCamelCase = main_layer_class(lowerCamelCase_ ) UpperCamelCase = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) ) UpperCamelCase = model(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" ) model.save(lowerCamelCase_ ) UpperCamelCase = tf.keras.models.load_model( lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(lowerCamelCase_ , tf.keras.Model ) UpperCamelCase = model(lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = outputs.last_hidden_state.numpy() UpperCamelCase = 0 else: UpperCamelCase = outputs.logits.numpy() UpperCamelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ ) UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = after_outputs["""last_hidden_state"""].numpy() UpperCamelCase = 0 else: UpperCamelCase = after_outputs["""logits"""].numpy() UpperCamelCase = 0 UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCamelCase_ ) UpperCamelCase = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCamelCase = model_class.from_config(model.config ) UpperCamelCase = new_model(lowerCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> int: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase = ViTMAEConfig() UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
343
0
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> "list[int]": if upper_limit < 0: raise ValueError('Limit for the Catalan sequence must be ≥ 0' ) __lowerCamelCase : Tuple = [0] * (upper_limit + 1) # Base case: C(0) = C(1) = 1 __lowerCamelCase : Tuple = 1 if upper_limit > 0: __lowerCamelCase : Optional[Any] = 1 # Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i for i in range(2 , upper_limit + 1 ): for j in range(lowerCamelCase__ ): catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1] return catalan_list if __name__ == "__main__": print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""") print("""\n*** Enter -1 at any time to quit ***""") print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""") try: while True: a =int(input().strip()) if N < 0: print("""\n********* Goodbye!! ************""") break else: print(F"""The Catalan numbers from 0 through {N} are:""") print(catalan_numbers(N)) print("""Try another upper limit for the sequence: """, end="""""") except (NameError, ValueError): print("""\n********* Invalid input, goodbye! ************\n""") import doctest doctest.testmod()
73
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' # Base Case if index == len(UpperCamelCase_ ): return True # Recursive Step for i in range(UpperCamelCase_ ): if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]: '''simple docstring''' UpperCamelCase = [-1] * len(UpperCamelCase_ ) if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ): return colored_vertices return []
343
0
"""simple docstring""" import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _lowercase = ( '''This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' ) def _snake_case ( snake_case__ : Dict , snake_case__ : List[str] ): warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , 'sklearn' ) return (preds == labels).mean() def _snake_case ( snake_case__ : Any , snake_case__ : Union[str, Any] ): warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , 'sklearn' ) A = simple_accuracy(snake_case__ , snake_case__ ) A = fa_score(y_true=snake_case__ , y_pred=snake_case__ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def _snake_case ( snake_case__ : str , snake_case__ : Union[str, Any] ): warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , 'sklearn' ) A = pearsonr(snake_case__ , snake_case__ )[0] A = spearmanr(snake_case__ , snake_case__ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def _snake_case ( snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : str ): warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , 'sklearn' ) assert len(snake_case__ ) == len(snake_case__ ), F'Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}' if task_name == "cola": return {"mcc": matthews_corrcoef(snake_case__ , snake_case__ )} elif task_name == "sst-2": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "mrpc": return acc_and_fa(snake_case__ , snake_case__ ) elif task_name == "sts-b": return pearson_and_spearman(snake_case__ , snake_case__ ) elif task_name == "qqp": return acc_and_fa(snake_case__ , snake_case__ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "qnli": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "rte": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "wnli": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} elif task_name == "hans": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} else: raise KeyError(snake_case__ ) def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ): warnings.warn(snake_case__ , snake_case__ ) requires_backends(snake_case__ , 'sklearn' ) if len(snake_case__ ) != len(snake_case__ ): raise ValueError(F'Predictions and labels have mismatched lengths {len(snake_case__ )} and {len(snake_case__ )}' ) if task_name == "xnli": return {"acc": simple_accuracy(snake_case__ , snake_case__ )} else: raise KeyError(snake_case__ )
74
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase( ) -> List[str]: '''simple docstring''' UpperCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase_ ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs] return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase = json.load(lowerCamelCase_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self : str ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : str ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(lowerCamelCase_ ): try: UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCamelCase = get_pairs(lowerCamelCase_ ) UpperCamelCase = """ """.join(lowerCamelCase_ ) UpperCamelCase = word return word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , lowerCamelCase_ ): UpperCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ): """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = """""".join(lowerCamelCase_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCamelCase = 0 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
343
0
'''simple docstring''' from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING a_ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) requires_backends(self, '''decord''' ) self.check_model_type(lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" lowerCamelCase_ ={} if frame_sampling_rate is not None: lowerCamelCase_ =frame_sampling_rate if num_frames is not None: lowerCamelCase_ =num_frames lowerCamelCase_ ={} if top_k is not None: lowerCamelCase_ =top_k return preprocess_params, {}, postprocess_params def __call__( self, lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" return super().__call__(lowerCAmelCase, **lowerCAmelCase ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=None, lowerCAmelCase=1 ): """simple docstring""" if num_frames is None: lowerCamelCase_ =self.model.config.num_frames if video.startswith('''http://''' ) or video.startswith('''https://''' ): lowerCamelCase_ =BytesIO(requests.get(lowerCAmelCase ).content ) lowerCamelCase_ =VideoReader(lowerCAmelCase ) videoreader.seek(0 ) lowerCamelCase_ =0 lowerCamelCase_ =num_frames * frame_sampling_rate - 1 lowerCamelCase_ =np.linspace(lowerCAmelCase, lowerCAmelCase, num=lowerCAmelCase, dtype=np.intaa ) lowerCamelCase_ =videoreader.get_batch(lowerCAmelCase ).asnumpy() lowerCamelCase_ =list(lowerCAmelCase ) lowerCamelCase_ =self.image_processor(lowerCAmelCase, return_tensors=self.framework ) return model_inputs def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.model(**lowerCAmelCase ) return model_outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=5 ): """simple docstring""" if top_k > self.model.config.num_labels: lowerCamelCase_ =self.model.config.num_labels if self.framework == "pt": lowerCamelCase_ =model_outputs.logits.softmax(-1 )[0] lowerCamelCase_, lowerCamelCase_ =probs.topk(lowerCAmelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowerCamelCase_ =scores.tolist() lowerCamelCase_ =ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase, lowerCAmelCase )]
75
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _SCREAMING_SNAKE_CASE = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]), ("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 2_3), ("""JH 9H TH KH QH""", 2_2), ("""JC KH JS JD JH""", 2_1), ("""KH KC 3S 3H 3D""", 2_0), ("""8C 9C 5C 3C TC""", 1_9), ("""JS QS 9H TS KH""", 1_8), ("""7C 7S KH 2H 7H""", 1_7), ("""3C KH 5D 5S KH""", 1_6), ("""QH 8H KD JH 8S""", 1_5), ("""2D 6D 9D TH 7D""", 1_4), ) def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) ) UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowercase( UpperCamelCase_ = 100 ) -> List[Any]: '''simple docstring''' return (generate_random_hand() for _ in range(UpperCamelCase_ )) @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: '''simple docstring''' UpperCamelCase = PokerHand(UpperCamelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(UpperCamelCase_ ) UpperCamelCase = chain(sorted(UpperCamelCase_ ) ) for index, hand in enumerate(UpperCamelCase_ ): assert hand == poker_hands[index] def lowercase( ) -> Union[str, Any]: '''simple docstring''' # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=UpperCamelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowercase( ) -> str: '''simple docstring''' # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowercase( ) -> int: '''simple docstring''' # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" ) with open(UpperCamelCase_ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ ) UpperCamelCase = player.compare_with(UpperCamelCase_ ) if output == "Win": answer += 1 assert answer == 376
343
0
a_ = 'Tobias Carryer' from time import time class _UpperCamelCase : '''simple docstring''' def __init__( self : Dict , a : List[Any] , a : str , a : List[Any] , a : List[Any]=int(time() ) ) -> Tuple: # noqa: B008 """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = multiplier SCREAMING_SNAKE_CASE : str = increment SCREAMING_SNAKE_CASE : List[str] = modulo SCREAMING_SNAKE_CASE : List[str] = seed def __UpperCamelCase ( self : int ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. a_ = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
76
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """xlnet""" __lowerCAmelCase = ["""mems"""] __lowerCAmelCase = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = n_layer UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) UpperCamelCase = d_model // n_head UpperCamelCase = ff_activation UpperCamelCase = d_inner UpperCamelCase = untie_r UpperCamelCase = attn_type UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = dropout UpperCamelCase = mem_len UpperCamelCase = reuse_len UpperCamelCase = bi_data UpperCamelCase = clamp_len UpperCamelCase = same_length UpperCamelCase = summary_type UpperCamelCase = summary_use_proj UpperCamelCase = summary_activation UpperCamelCase = summary_last_dropout UpperCamelCase = start_n_top UpperCamelCase = end_n_top UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , lowerCamelCase_ , ) UpperCamelCase = kwargs["""use_cache"""] UpperCamelCase = use_mems_eval UpperCamelCase = use_mems_train super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
343
0
"""simple docstring""" import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration _UpperCamelCase : Any = 5_00_00 _UpperCamelCase : Any = 50_00 _UpperCamelCase , _UpperCamelCase : List[str] = os.path.split(__file__) _UpperCamelCase : Dict = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def a_ ( _lowerCAmelCase : datasets.Dataset , _lowerCAmelCase : Dict ): '''simple docstring''' for i in range(_lowerCAmelCase ): lowercase__ : int = dataset[i] @get_duration def a_ ( _lowerCAmelCase : datasets.Dataset , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ): '''simple docstring''' for i in range(0 , len(_lowerCAmelCase ) , _lowerCAmelCase ): lowercase__ : Tuple = dataset[i : i + batch_size] @get_duration def a_ ( _lowerCAmelCase : datasets.Dataset , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ): '''simple docstring''' with dataset.formatted_as(type=_lowerCAmelCase ): for i in range(_lowerCAmelCase ): lowercase__ : Any = dataset[i] @get_duration def a_ ( _lowerCAmelCase : datasets.Dataset , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ): '''simple docstring''' with dataset.formatted_as(type=_lowerCAmelCase ): for i in range(0 , _lowerCAmelCase , _lowerCAmelCase ): lowercase__ : Optional[int] = dataset[i : i + batch_size] def a_ ( ): '''simple docstring''' lowercase__ : Any = {'num examples': SPEED_TEST_N_EXAMPLES} lowercase__ : int = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] lowercase__ : Optional[Any] = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) lowercase__ : List[str] = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) lowercase__ : str = generate_example_dataset( os.path.join(_lowerCAmelCase , 'dataset.arrow' ) , _lowerCAmelCase , num_examples=_lowerCAmelCase , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(_lowerCAmelCase ) ) lowercase__ : Dict = func(_lowerCAmelCase , **_lowerCAmelCase ) print('shuffling dataset' ) lowercase__ : Union[str, Any] = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(_lowerCAmelCase ) ) lowercase__ : str = func( _lowerCAmelCase , **_lowerCAmelCase ) with open(_lowerCAmelCase , 'wb' ) as f: f.write(json.dumps(_lowerCAmelCase ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
77
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""") class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = 0 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved UpperCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase_ ( self : Any ): """simple docstring""" class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = True try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
343
0
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ = logging.get_logger(__name__) snake_case_ = {"""vocab_file""": """vocab.json"""} snake_case_ = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } snake_case_ = {"""mgp-str""": 27} class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self :Dict , lowercase_ :Tuple , lowercase_ :Optional[int]="[GO]" , lowercase_ :Tuple="[GO]" , lowercase_ :Optional[Any]="[s]" , lowercase_ :List[str]="[GO]" , **lowercase_ :int ) -> List[str]: super().__init__( unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , **lowercase_ , ) with open(lowercase_ , encoding='utf-8' ) as vocab_handle: UpperCAmelCase = json.load(lowercase_ ) UpperCAmelCase = {v: k for k, v in self.vocab.items()} @property def UpperCAmelCase__ ( self :List[str] ) -> Any: return len(self.vocab ) def UpperCAmelCase__ ( self :Dict ) -> List[str]: return dict(self.vocab , **self.added_tokens_encoder ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] ) -> int: UpperCAmelCase = [] for s in text: char_tokens.extend(lowercase_ ) return char_tokens def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :Union[str, Any] ) -> Dict: return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[int] ) -> Optional[Any]: return self.decoder.get(lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(lowercase_ ): logger.error('Vocabulary path ({}) should be a directory'.format(lowercase_ ) ) return UpperCAmelCase = os.path.join( lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) with open(lowercase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowercase_ , ensure_ascii=lowercase_ ) + '\n' ) return (vocab_file,)
78
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X2_0000 and cp <= 0X2_A6DF) # or (cp >= 0X2_A700 and cp <= 0X2_B73F) # or (cp >= 0X2_B740 and cp <= 0X2_B81F) # or (cp >= 0X2_B820 and cp <= 0X2_CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2_F800 and cp <= 0X2_FA1F) # ): # return True return False def lowercase( UpperCamelCase_ ) -> Dict: '''simple docstring''' # word like '180' or '身高' or '神' for char in word: UpperCamelCase = ord(UpperCamelCase_ ) if not _is_chinese_char(UpperCamelCase_ ): return 0 return 1 def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' UpperCamelCase = set() for token in tokens: UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ ) if chinese_word: word_set.add(UpperCamelCase_ ) UpperCamelCase = list(UpperCamelCase_ ) return word_list def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] ) UpperCamelCase = bert_tokens UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ ) while start < end: UpperCamelCase = True if is_chinese(bert_word[start] ): UpperCamelCase = min(end - start , UpperCamelCase_ ) for i in range(UpperCamelCase_ , 1 , -1 ): UpperCamelCase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): UpperCamelCase = """##""" + bert_word[j] UpperCamelCase = start + i UpperCamelCase = False break if single_word: start += 1 return bert_word def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0] UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res] ltp_res.extend(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = [] for id in input_ids: UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ ) input_tokens.append(UpperCamelCase_ ) UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase_ ): if token[:2] == "##": UpperCamelCase = token[2:] # save chinese tokens' pos if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ): ref_id.append(UpperCamelCase_ ) ref_ids.append(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) return ref_ids def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: UpperCamelCase = f.readlines() UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' UpperCamelCase = LTP(args.ltp ) # faster in GPU device UpperCamelCase = BertTokenizer.from_pretrained(args.bert ) UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids] f.writelines(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
343
0
'''simple docstring''' from bisect import bisect from itertools import accumulate def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase ) -> List[str]: '''simple docstring''' _A = sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : x[0] / x[1] , reverse=__lowercase ) _A , _A = [i[0] for i in r], [i[1] for i in r] _A = list(accumulate(__lowercase ) ) _A = bisect(__lowercase , __lowercase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
79
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 1 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (self.image_size, self.image_size) UpperCamelCase = (self.patch_size, self.patch_size) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = FlaxViTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest("""JIT Enabled""" ): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) UpperCamelCase = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase_ )
343
0
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu a__ : Any = [ 'EAGER', 'AOT_EAGER', 'INDUCTOR', 'NVFUSER', 'AOT_NVFUSER', 'AOT_CUDAGRAPHS', 'OFI', 'FX2TRT', 'ONNXRT', 'IPEX', ] def _UpperCamelCase ( __A , __A=None , __A=None , __A=None ) -> int: '''simple docstring''' UpperCamelCase__ = True while ask_again: UpperCamelCase__ = input(__A ) try: if default is not None and len(__A ) == 0: return default return convert_value(__A ) if convert_value is not None else result except Exception: if error_message is not None: print(__A ) def _UpperCamelCase ( __A , __A=[] , __A=None , __A=0 ) -> Any: '''simple docstring''' UpperCamelCase__ = BulletMenu(__A , __A ) UpperCamelCase__ = menu.run(default_choice=__A ) return convert_value(__A ) if convert_value is not None else result def _UpperCamelCase ( __A ) -> Dict: '''simple docstring''' UpperCamelCase__ = int(__A ) return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] ) def _UpperCamelCase ( __A ) -> List[Any]: '''simple docstring''' UpperCamelCase__ = int(__A ) return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] ) def _UpperCamelCase ( __A ) -> Dict: '''simple docstring''' UpperCamelCase__ = int(__A ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def _UpperCamelCase ( __A ) -> str: '''simple docstring''' UpperCamelCase__ = int(__A ) return PrecisionType(["no", "fp16", "bf16", "fp8"][value] ) def _UpperCamelCase ( __A ) -> Any: '''simple docstring''' UpperCamelCase__ = int(__A ) return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] ) def _UpperCamelCase ( __A ) -> Dict: '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class lowercase_ ( argparse.RawDescriptionHelpFormatter ): def __a ( self , a , a , a , a ): UpperCamelCase__ = super()._format_usage(a , a , a , a ) UpperCamelCase__ = usage.replace("<command> [<args>] " , "" ) return usage
80
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ : def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = range_bbox def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase = bbox[i, j, 3] UpperCamelCase = bbox[i, j, 1] UpperCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase = bbox[i, j, 2] UpperCamelCase = bbox[i, j, 0] UpperCamelCase = t UpperCamelCase = None if self.use_input_mask: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __lowerCAmelCase = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ): """simple docstring""" return True def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = LiltModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ ) UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ ) UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ ) UpperCamelCase = torch.Size([1, 2, 768] ) UpperCamelCase = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
343
0
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class __A : """simple docstring""" def __init__( self ) -> Tuple: a ={} def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=1 ) -> Union[str, Any]: if self.graph.get(__A ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: a =[[w, v]] if not self.graph.get(__A ): a =[] def SCREAMING_SNAKE_CASE ( self ) -> int: return list(self.graph ) def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Optional[Any]: if self.graph.get(__A ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__A ) def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> List[Any]: if s == d: return [] a =[] a =[] if s == -2: a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__A ) return visited else: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =ss # check if se have reached the starting point if len(__A ) == 0: return visited def SCREAMING_SNAKE_CASE ( self , __A=-1 ) -> int: if c == -1: a =floor(random() * 1_0000 ) + 10 for i in range(__A ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): a =floor(random() * c ) + 1 if n != i: self.add_pair(__A , __A , 1 ) def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> List[str]: a =deque() a =[] if s == -2: a =list(self.graph )[0] d.append(__A ) visited.append(__A ) while d: a =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]: a =0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple: return len(self.graph[u] ) def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> List[Any]: a =[] a =[] if s == -2: a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =s a =[] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =ss # check if se have reached the starting point if len(__A ) == 0: return sorted_nodes def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a =[] a =[] a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =-2 a =[] a =s a =False a =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): a =len(__A ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() a =True if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =False indirect_parents.append(__A ) a =s a =ss # check if se have reached the starting point if len(__A ) == 0: return list(__A ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: a =[] a =[] a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =-2 a =[] a =s a =False a =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): a =len(__A ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() a =True if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =False indirect_parents.append(__A ) a =s a =ss # check if se have reached the starting point if len(__A ) == 0: return False def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> List[str]: a =time() self.dfs(__A , __A ) a =time() return end - begin def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> int: a =time() self.bfs(__A ) a =time() return end - begin class __A : """simple docstring""" def __init__( self ) -> List[str]: a ={} def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=1 ) -> Dict: # check if the u exists if self.graph.get(__A ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist a =[[w, v]] # add the other way if self.graph.get(__A ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist a =[[w, u]] def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> Any: if self.graph.get(__A ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(__A ) # the other way round if self.graph.get(__A ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(__A ) def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> int: if s == d: return [] a =[] a =[] if s == -2: a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(__A ) return visited else: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =ss # check if se have reached the starting point if len(__A ) == 0: return visited def SCREAMING_SNAKE_CASE ( self , __A=-1 ) -> List[Any]: if c == -1: a =floor(random() * 1_0000 ) + 10 for i in range(__A ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): a =floor(random() * c ) + 1 if n != i: self.add_pair(__A , __A , 1 ) def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> str: a =deque() a =[] if s == -2: a =list(self.graph )[0] d.append(__A ) visited.append(__A ) while d: a =d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def SCREAMING_SNAKE_CASE ( self , __A ) -> str: return len(self.graph[u] ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: a =[] a =[] a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =-2 a =[] a =s a =False a =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): a =len(__A ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() a =True if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =False indirect_parents.append(__A ) a =s a =ss # check if se have reached the starting point if len(__A ) == 0: return list(__A ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: a =[] a =[] a =list(self.graph )[0] stack.append(__A ) visited.append(__A ) a =-2 a =[] a =s a =False a =set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: a =s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): a =len(__A ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) a =node[1] break # check if all the children are visited if s == ss: stack.pop() a =True if len(__A ) != 0: a =stack[len(__A ) - 1] else: a =False indirect_parents.append(__A ) a =s a =ss # check if se have reached the starting point if len(__A ) == 0: return False def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return list(self.graph ) def SCREAMING_SNAKE_CASE ( self , __A=-2 , __A=-1 ) -> Optional[int]: a =time() self.dfs(__A , __A ) a =time() return end - begin def SCREAMING_SNAKE_CASE ( self , __A=-2 ) -> Dict: a =time() self.bfs(__A ) a =time() return end - begin
81
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ): """simple docstring""" UpperCamelCase = size if size is not None else {"""shortest_edge""": 30} UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize_and_center_crop UpperCamelCase = size UpperCamelCase = crop_pct UpperCamelCase = crop_size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = PoolFormerImageProcessingTester(self ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
343
0
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image A__ = ["""text""", """image""", """audio"""] def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = [] for input_type in input_types: if input_type == "text": inputs.append("""Text input""" ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_12, 5_12) ) ) elif input_type == "audio": inputs.append(torch.ones(30_00 ) ) elif isinstance(snake_case , snake_case ): inputs.append(create_inputs(snake_case ) ) else: raise ValueError(F'Invalid type requested: {input_type}' ) return inputs def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = [] for output in outputs: if isinstance(snake_case , (str, AgentText) ): output_types.append("""text""" ) elif isinstance(snake_case , (Image.Image, AgentImage) ): output_types.append("""image""" ) elif isinstance(snake_case , (torch.Tensor, AgentAudio) ): output_types.append("""audio""" ) else: raise ValueError(F'Invalid output: {output}' ) return output_types @is_tool_test class __lowerCAmelCase : def snake_case ( self ): """simple docstring""" self.assertTrue(hasattr(self.tool , """inputs""" ) ) self.assertTrue(hasattr(self.tool , """outputs""" ) ) _lowerCAmelCase = self.tool.inputs for _input in inputs: if isinstance(_input , _snake_case ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) _lowerCAmelCase = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = create_inputs(self.tool.inputs ) _lowerCAmelCase = self.tool(*_snake_case ) # There is a single output if len(self.tool.outputs ) == 1: _lowerCAmelCase = [outputs] self.assertListEqual(output_types(_snake_case ) , self.tool.outputs ) def snake_case ( self ): """simple docstring""" self.assertTrue(hasattr(self.tool , """description""" ) ) self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) ) self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = create_inputs(self.tool.inputs ) _lowerCAmelCase = self.tool(*_snake_case ) if not isinstance(_snake_case , _snake_case ): _lowerCAmelCase = [outputs] self.assertEqual(len(_snake_case ) , len(self.tool.outputs ) ) for output, output_type in zip(_snake_case , self.tool.outputs ): _lowerCAmelCase = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(_snake_case , _snake_case ) ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = create_inputs(self.tool.inputs ) _lowerCAmelCase = [] for _input, input_type in zip(_snake_case , self.tool.inputs ): if isinstance(_snake_case , _snake_case ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error _lowerCAmelCase = self.tool(*_snake_case ) if not isinstance(_snake_case , _snake_case ): _lowerCAmelCase = [outputs] self.assertEqual(len(_snake_case ) , len(self.tool.outputs ) )
82
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
343
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm snake_case_ : List[Any] = logging.get_logger(__name__) @dataclass class lowercase__ ( lowercase ): lowercase__ = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self : int ,**lowerCamelCase__ : List[Any] ): '''simple docstring''' for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _UpperCamelCase : List[str] = deprecated_arg[3:] setattr(self ,lowerCamelCase__ ,not kwargs.pop(lowerCamelCase__ ) ) logger.warning( F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or' F' {positive_arg}={kwargs[positive_arg]}' ) _UpperCamelCase : Optional[Any] = kwargs.pop('torchscript' ,self.torchscript ) _UpperCamelCase : List[str] = kwargs.pop('torch_xla_tpu_print_metrics' ,self.torch_xla_tpu_print_metrics ) _UpperCamelCase : Optional[Any] = kwargs.pop('fp16_opt_level' ,self.fpaa_opt_level ) super().__init__(**lowerCamelCase__ ) lowercase__ = field(default=lowercase , metadata={"""help""": """Trace the models using torchscript"""} ) lowercase__ = field(default=lowercase , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} ) lowercase__ = field( default="""O1""" , metadata={ """help""": ( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """ """See details at https://nvidia.github.io/apex/amp.html""" ) } , ) @cached_property def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' requires_backends(self ,['torch'] ) logger.info('PyTorch: setting up devices' ) if not self.cuda: _UpperCamelCase : Any = torch.device('cpu' ) _UpperCamelCase : Union[str, Any] = 0 elif is_torch_tpu_available(): _UpperCamelCase : Optional[Any] = xm.xla_device() _UpperCamelCase : Dict = 0 else: _UpperCamelCase : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _UpperCamelCase : List[Any] = torch.cuda.device_count() return device, n_gpu @property def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return is_torch_tpu_available() and self.tpu @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' requires_backends(self ,['torch'] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' requires_backends(self ,['torch'] ) return self._setup_devices[0] @property def UpperCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' requires_backends(self ,['torch'] ) return self._setup_devices[1] @property def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.n_gpu > 0
83
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """trocr""" __lowerCAmelCase = ["""past_key_values"""] __lowerCAmelCase = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = activation_function UpperCamelCase = max_position_embeddings UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = init_std UpperCamelCase = decoder_layerdrop UpperCamelCase = use_cache UpperCamelCase = scale_embedding UpperCamelCase = use_learned_position_embeddings UpperCamelCase = layernorm_embedding super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
343
0
"""simple docstring""" from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A = None , __A = None , __A = None , __A = None , __A = False , __A = False , __A = None , **__A , ) -> List[str]: lowerCAmelCase_ :List[str] = path_or_paths lowerCAmelCase_ :int = split if split or isinstance(__A , __A ) else """train""" lowerCAmelCase_ :Tuple = features lowerCAmelCase_ :str = cache_dir lowerCAmelCase_ :int = keep_in_memory lowerCAmelCase_ :Tuple = streaming lowerCAmelCase_ :Optional[int] = num_proc lowerCAmelCase_ :Optional[int] = kwargs @abstractmethod def __lowerCAmelCase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]: pass class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A = None , __A = None , __A = False , __A = False , __A = None , **__A , ) -> int: lowerCAmelCase_ :List[Any] = features lowerCAmelCase_ :str = cache_dir lowerCAmelCase_ :List[str] = keep_in_memory lowerCAmelCase_ :Union[str, Any] = streaming lowerCAmelCase_ :List[Any] = num_proc lowerCAmelCase_ :List[str] = kwargs @abstractmethod def __lowerCAmelCase ( self ) -> Union[Dataset, IterableDataset]: pass
84
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase = """swin""" __lowerCAmelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names ) class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return 1E-4
343
0
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class _snake_case : lowerCAmelCase_ : int lowerCAmelCase_ : TreeNode | None = None lowerCAmelCase_ : TreeNode | None = None _SCREAMING_SNAKE_CASE : List[str] = namedtuple("CoinsDistribResult", "moves excess") def UpperCamelCase_( snake_case : TreeNode | None ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(snake_case : TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(snake_case : TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(snake_case ) != count_coins(snake_case ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(snake_case : TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) snake_case_ , snake_case_ = get_distrib(node.left ) snake_case_ , snake_case_ = get_distrib(node.right ) snake_case_ = 1 - left_distrib_excess snake_case_ = 1 - right_distrib_excess snake_case_ = ( left_distrib_moves + right_distrib_moves + abs(snake_case ) + abs(snake_case ) ) snake_case_ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(snake_case , snake_case ) return get_distrib(snake_case )[0] if __name__ == "__main__": import doctest doctest.testmod()
85
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) _SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) _SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(6_4, 6_4) ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) _SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) _SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _SCREAMING_SNAKE_CASE = """Normal""" if result[0][0] == 1: _SCREAMING_SNAKE_CASE = """Abnormality detected"""
343
0
"""simple docstring""" from __future__ import annotations def __lowerCAmelCase (_UpperCamelCase ): return len(set(_UpperCamelCase ) ) == len(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
86
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): pass class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __iter__( self : Optional[int] ): """simple docstring""" UpperCamelCase = self UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data UpperCamelCase = node.next_node @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Node(1) _SCREAMING_SNAKE_CASE = Node(2) _SCREAMING_SNAKE_CASE = Node(3) _SCREAMING_SNAKE_CASE = Node(4) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = root_node.next_node print(root_node.has_loop) # True _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = Node(1) print(root_node.has_loop) # False
343
0
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class snake_case_ ( __A ,__A ): @register_to_config def __init__( self : Any , *, lowercase_ : int = 4 , lowercase_ : int = 7_68 , lowercase_ : int , lowercase_ : Optional[Any] , ) -> Any: super().__init__() lowercase__ : Optional[Any] = nn.Parameter(torch.zeros(lowercase_ ) ) # parameters for additional clip time embeddings lowercase__ : Tuple = nn.Linear(lowercase_ , lowercase_ ) lowercase__ : int = nn.Linear(lowercase_ , lowercase_ ) # parameters for encoder hidden states lowercase__ : Dict = clip_extra_context_tokens lowercase__ : str = nn.Linear( lowercase_ , self.clip_extra_context_tokens * cross_attention_dim ) lowercase__ : Optional[int] = nn.Linear(lowercase_ , lowercase_ ) lowercase__ : Any = nn.LayerNorm(lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , *, lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : str ) -> Any: if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings lowercase__ : List[str] = image_embeddings.shape[0] lowercase__ : Tuple = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) lowercase__ : int = classifier_free_guidance_embeddings.expand( lowercase_ , -1 ) lowercase__ : Tuple = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] lowercase__ : Optional[Any] = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... lowercase__ : Tuple = self.embedding_proj(lowercase_ ) lowercase__ : Union[str, Any] = self.clip_image_embeddings_project_to_time_embeddings(lowercase_ ) lowercase__ : List[str] = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" lowercase__ : Dict = self.clip_extra_context_tokens_proj(lowercase_ ) lowercase__ : Union[str, Any] = clip_extra_context_tokens.reshape(lowercase_ , -1 , self.clip_extra_context_tokens ) lowercase__ : Dict = clip_extra_context_tokens.permute(0 , 2 , 1 ) lowercase__ : Tuple = self.encoder_hidden_states_proj(lowercase_ ) lowercase__ : Tuple = self.text_encoder_hidden_states_norm(lowercase_ ) lowercase__ : List[str] = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
87
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , ) assert hasattr(self , """env""" ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = { """enabled""": True, """processes_per_host""": 8, } UpperCamelCase = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ): """simple docstring""" TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = self.create_estimator(lowerCamelCase_ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
343
0
def a__ ( A_ ): '''simple docstring''' stooge(A_, 0, len(A_ ) - 1 ) return arr def a__ ( A_, A_, A_ ): '''simple docstring''' if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: __magic_name__ , __magic_name__ = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: __magic_name__ = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(A_, A_, (h - t) ) # Recursively sort last 2/3 elements stooge(A_, i + t, (A_) ) # Recursively sort first 2/3 elements stooge(A_, A_, (h - t) ) if __name__ == "__main__": __lowerCAmelCase : List[str] = input('Enter numbers separated by a comma:\n').strip() __lowerCAmelCase : Any = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
88
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
343
0
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : Any ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowercase ( self : int ): torch.manual_seed(0 ) _a : int = UNetaDModel( sample_size=(32, 64) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=('AttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'AttnUpBlock2D') ,) return model @property def __lowercase ( self : Optional[int] ): torch.manual_seed(0 ) _a : Any = UNetaDConditionModel( sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') ,cross_attention_dim=10 ,) return model @property def __lowercase ( self : Any ): torch.manual_seed(0 ) _a : List[str] = AutoencoderKL( sample_size=(128, 64) ,in_channels=1 ,out_channels=1 ,latent_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,) _a : str = UNetaDModel( sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=('AttnDownBlock2D', 'DownBlock2D') ,up_block_types=('UpBlock2D', 'AttnUpBlock2D') ,) return vqvae, unet @slow def __lowercase ( self : Any ): _a : str = 'cpu' # ensure determinism for the device-dependent torch.Generator _a : int = Mel( x_res=self.dummy_unet.config.sample_size[1] ,y_res=self.dummy_unet.config.sample_size[0] ,) _a : int = DDPMScheduler() _a : List[str] = AudioDiffusionPipeline(vqvae=_UpperCAmelCase ,unet=self.dummy_unet ,mel=_UpperCAmelCase ,scheduler=_UpperCAmelCase ) _a : List[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 ) _a : List[Any] = pipe(generator=_UpperCAmelCase ,steps=4 ) _a : int = output.audios[0] _a : Tuple = output.images[0] _a : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 ) _a : Dict = pipe(generator=_UpperCAmelCase ,steps=4 ,return_dict=_UpperCAmelCase ) _a : Optional[int] = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _a : Dict = np.frombuffer(image.tobytes() ,dtype='uint8' )[:10] _a : Dict = np.frombuffer(image_from_tuple.tobytes() ,dtype='uint8' )[:10] _a : List[Any] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _a : Any = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] ,y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] ,) _a : List[Any] = DDIMScheduler() _a : List[Any] = self.dummy_vqvae_and_unet _a : Optional[int] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] ,unet=dummy_vqvae_and_unet[1] ,mel=_UpperCAmelCase ,scheduler=_UpperCAmelCase ) _a : Optional[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) np.random.seed(0 ) _a : Union[str, Any] = np.random.uniform(-1 ,1 ,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _a : List[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 ) _a : Optional[int] = pipe(raw_audio=_UpperCAmelCase ,generator=_UpperCAmelCase ,start_step=5 ,steps=10 ) _a : Dict = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _a : str = np.frombuffer(image.tobytes() ,dtype='uint8' )[:10] _a : Any = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _a : Tuple = self.dummy_unet_condition _a : List[Any] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] ,unet=_UpperCAmelCase ,mel=_UpperCAmelCase ,scheduler=_UpperCAmelCase ) _a : int = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) np.random.seed(0 ) _a : Tuple = torch.rand((1, 1, 10) ) _a : Optional[Any] = pipe(generator=_UpperCAmelCase ,encoding=_UpperCAmelCase ) _a : int = output.images[0] _a : Any = np.frombuffer(image.tobytes() ,dtype='uint8' )[:10] _a : Optional[int] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class __magic_name__ ( unittest.TestCase ): def __lowercase ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase ( self : int ): _a : Tuple = torch_device _a : Tuple = DiffusionPipeline.from_pretrained('teticio/audio-diffusion-ddim-256' ) _a : List[Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _a : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 ) _a : Optional[int] = pipe(generator=_UpperCAmelCase ) _a : List[Any] = output.audios[0] _a : Optional[Any] = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _a : int = np.frombuffer(image.tobytes() ,dtype='uint8' )[:10] _a : int = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
89
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ShapEPipeline __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __lowerCAmelCase = False @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return 8 @property def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(lowerCamelCase_ ) @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase = PriorTransformer(**lowerCamelCase_ ) return model @property def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase = ShapERenderer(**lowerCamelCase_ ) return model def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_text_encoder UpperCamelCase = self.dummy_tokenizer UpperCamelCase = self.dummy_renderer UpperCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , ) UpperCamelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ): """simple docstring""" if str(lowerCamelCase_ ).startswith("""mps""" ): UpperCamelCase = torch.manual_seed(lowerCamelCase_ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) UpperCamelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = """cpu""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) UpperCamelCase = output.images[0] UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self : Tuple ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch_device == """cpu""" UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase = batch_size * [inputs[key]] UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCamelCase = pipe( """a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
343
0
__A = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
90
from __future__ import annotations def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list: '''simple docstring''' UpperCamelCase = [] UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCamelCase = result + left + right return input_list def lowercase( UpperCamelCase_ ) -> list: '''simple docstring''' if len(UpperCamelCase_ ) <= 1: return input_list UpperCamelCase = list(UpperCamelCase_ ) # iteration for two-way merging UpperCamelCase = 2 while p <= len(UpperCamelCase_ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = i + p - 1 UpperCamelCase = (low + high + 1) // 2 UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # final merge of last two parts if p * 2 >= len(UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
343
0
"""simple docstring""" import math class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Optional[int] , lowercase_ : Dict=0): # a graph with Node 0,1,...,N-1 '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = n SCREAMING_SNAKE_CASE_ : Any = [ [math.inf for j in range(0 , lowercase_)] for i in range(0 , lowercase_) ] # adjacency matrix for weight SCREAMING_SNAKE_CASE_ : str = [ [math.inf for j in range(0 , lowercase_)] for i in range(0 , lowercase_) ] # dp[i][j] stores minimum distance from i to j def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = w def _SCREAMING_SNAKE_CASE ( self : Dict): '''simple docstring''' for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): SCREAMING_SNAKE_CASE_ : List[Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Dict , lowercase_ : Optional[Any]): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": UpperCAmelCase_ : List[str] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
91
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = num_groups def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = BitModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = BitForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowerCAmelCase = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""Bit does not output attentions""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(config=lowerCamelCase_ ) for name, module in model.named_modules(): if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def lowerCamelCase_ ( self : int ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): UpperCamelCase = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : int ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitBackbone,) if is_torch_available() else () __lowerCAmelCase = BitConfig __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self )
343
0
import math import flax.linen as nn import jax.numpy as jnp def _a ( SCREAMING_SNAKE_CASE_ : jnp.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : float = 1 , SCREAMING_SNAKE_CASE_ : float = 1 , SCREAMING_SNAKE_CASE_ : float = 1.0E4 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even""" __lowerCAmelCase = float(embedding_dim // 2 ) __lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) * -log_timescale_increment ) __lowerCAmelCase = jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) * jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 0 ) # scale embeddings __lowerCAmelCase = scale * emb if flip_sin_to_cos: __lowerCAmelCase = jnp.concatenate([jnp.cos(SCREAMING_SNAKE_CASE_ ), jnp.sin(SCREAMING_SNAKE_CASE_ )] , axis=1 ) else: __lowerCAmelCase = jnp.concatenate([jnp.sin(SCREAMING_SNAKE_CASE_ ), jnp.cos(SCREAMING_SNAKE_CASE_ )] , axis=1 ) __lowerCAmelCase = jnp.reshape(SCREAMING_SNAKE_CASE_ , [jnp.shape(SCREAMING_SNAKE_CASE_ )[0], embedding_dim] ) return signal class a__ ( nn.Module ): _a : int = 3_2 _a : jnp.dtype = jnp.floataa @nn.compact def __call__( self , _A ): """simple docstring""" __lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(_A ) __lowerCAmelCase = nn.silu(_A ) __lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(_A ) return temb class a__ ( nn.Module ): _a : int = 3_2 _a : bool = False _a : float = 1 @nn.compact def __call__( self , _A ): """simple docstring""" return get_sinusoidal_embeddings( _A , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
92
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = TFResNetModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __lowerCAmelCase = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = TFResNetModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ): UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Any ): """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # forward pass UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
343
0
'''simple docstring''' def snake_case_ ( __SCREAMING_SNAKE_CASE : float ): """simple docstring""" return 10 - x * x def snake_case_ ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): """simple docstring""" if equation(__SCREAMING_SNAKE_CASE ) * equation(__SCREAMING_SNAKE_CASE ) >= 0: raise ValueError('''Wrong space!''' ) lowercase_ : Any = a while (b - a) >= 0.01: # Find middle point lowercase_ : Dict = (a + b) / 2 # Check if middle point is root if equation(__SCREAMING_SNAKE_CASE ) == 0.0: break # Decide the side to repeat the steps if equation(__SCREAMING_SNAKE_CASE ) * equation(__SCREAMING_SNAKE_CASE ) < 0: lowercase_ : Optional[Any] = c else: lowercase_ : List[Any] = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
93
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val def lowercase( UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase = """""" if is_panoptic: UpperCamelCase = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCamelCase = """resnet101""" if "dc5" in model_name: UpperCamelCase = True UpperCamelCase = """panoptic""" in model_name if is_panoptic: UpperCamelCase = 250 else: UpperCamelCase = 91 UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """coco-detection-id2label.json""" UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} # load image processor UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection""" UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ ) # prepare image UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) UpperCamelCase = encoding["""pixel_values"""] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval() UpperCamelCase = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCamelCase = """conditional_detr.""" + src rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val # finally, create HuggingFace model and load state dict UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion UpperCamelCase = conditional_detr(UpperCamelCase_ ) UpperCamelCase = model(UpperCamelCase_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
343
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter snake_case : int = '''Create a default config file for Accelerate with only a few flags set.''' def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any]="no" , UpperCAmelCase_ : str = default_json_config_file , UpperCAmelCase_ : bool = False ): """simple docstring""" a :List[str] = Path(UpperCAmelCase_ ) path.parent.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False a :Optional[Any] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) a :List[Any] = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): a :Dict = torch.cuda.device_count() a :Tuple = num_gpus a :int = False if num_gpus > 1: a :str = '''MULTI_GPU''' else: a :List[Any] = '''NO''' elif is_xpu_available() and use_xpu: a :List[Any] = torch.xpu.device_count() a :Optional[int] = num_xpus a :List[Any] = False if num_xpus > 1: a :int = '''MULTI_XPU''' else: a :str = '''NO''' elif is_npu_available(): a :List[str] = torch.npu.device_count() a :Any = num_npus a :Optional[int] = False if num_npus > 1: a :List[str] = '''MULTI_NPU''' else: a :Dict = '''NO''' else: a :str = 0 a :Optional[Any] = True a :Optional[Any] = 1 a :str = '''NO''' a :List[str] = ClusterConfig(**UpperCAmelCase_ ) config.to_json_file(UpperCAmelCase_ ) return path def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] ): """simple docstring""" a :List[Any] = parser.add_parser('''default''' , parents=UpperCAmelCase_ , help=UpperCAmelCase_ , formatter_class=UpperCAmelCase_ ) parser.add_argument( '''--config_file''' , default=UpperCAmelCase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=UpperCAmelCase_ , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=UpperCAmelCase_ ) return parser def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" a :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
94
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = mask_ratio UpperCamelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) # expected sequence length = num_patches UpperCamelCase = (self.image_size // self.patch_size) ** 2 UpperCamelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) UpperCamelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self : str ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = outputs_dict[0].numpy() UpperCamelCase = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ): UpperCamelCase = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCamelCase_ ): UpperCamelCase = v.numpy() else: UpperCamelCase = np.array(lowerCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.constant(lowerCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase = tf_noise super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(lowerCamelCase_ ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ ) } UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: UpperCamelCase = main_layer_class(lowerCamelCase_ ) UpperCamelCase = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) ) UpperCamelCase = model(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" ) model.save(lowerCamelCase_ ) UpperCamelCase = tf.keras.models.load_model( lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(lowerCamelCase_ , tf.keras.Model ) UpperCamelCase = model(lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = outputs.last_hidden_state.numpy() UpperCamelCase = 0 else: UpperCamelCase = outputs.logits.numpy() UpperCamelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ ) UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = after_outputs["""last_hidden_state"""].numpy() UpperCamelCase = 0 else: UpperCamelCase = after_outputs["""logits"""].numpy() UpperCamelCase = 0 UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCamelCase_ ) UpperCamelCase = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCamelCase = model_class.from_config(model.config ) UpperCamelCase = new_model(lowerCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> int: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase = ViTMAEConfig() UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
343
0
from collections.abc import Generator from math import sin def _A ( SCREAMING_SNAKE_CASE : bytes ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) != 32: raise ValueError("Input must be of length 32" ) a__ : Optional[Any] =b"" for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _A ( SCREAMING_SNAKE_CASE : int ): """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) a__ : Optional[Any] =format(SCREAMING_SNAKE_CASE , "08x" )[-8:] a__ : Tuple =b"" for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" ) return little_endian_hex def _A ( SCREAMING_SNAKE_CASE : bytes ): """simple docstring""" a__ : Any =b"" for char in message: bit_string += format(SCREAMING_SNAKE_CASE , "08b" ).encode("utf-8" ) a__ : Optional[Any] =format(len(SCREAMING_SNAKE_CASE ) , "064b" ).encode("utf-8" ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(SCREAMING_SNAKE_CASE ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def _A ( SCREAMING_SNAKE_CASE : bytes ): """simple docstring""" if len(SCREAMING_SNAKE_CASE ) % 512 != 0: raise ValueError("Input must have length that's a multiple of 512" ) for pos in range(0 , len(SCREAMING_SNAKE_CASE ) , 512 ): a__ : List[str] =bit_string[pos : pos + 512] a__ : Union[str, Any] =[] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def _A ( SCREAMING_SNAKE_CASE : int ): """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) a__ : Optional[int] =format(SCREAMING_SNAKE_CASE , "032b" ) a__ : Optional[int] ="" for c in i_str: new_str += "1" if c == "0" else "0" return int(SCREAMING_SNAKE_CASE , 2 ) def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" return (a + b) % 2**32 def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" if i < 0: raise ValueError("Input must be non-negative" ) if shift < 0: raise ValueError("Shift must be non-negative" ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def _A ( SCREAMING_SNAKE_CASE : bytes ): """simple docstring""" a__ : str =preprocess(SCREAMING_SNAKE_CASE ) a__ : List[str] =[int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states a__ : List[Any] =0x67_452_301 a__ : Optional[Any] =0xEF_CDA_B89 a__ : List[Any] =0x98_BAD_CFE a__ : Any =0x10_325_476 a__ : str =[ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(SCREAMING_SNAKE_CASE ): a__ : List[Any] =aa a__ : List[Any] =ba a__ : List[Any] =ca a__ : List[str] =da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f a__ : Tuple =d ^ (b & (c ^ d)) a__ : str =i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f a__ : Any =c ^ (d & (b ^ c)) a__ : str =(5 * i + 1) % 16 elif i <= 47: a__ : Dict =b ^ c ^ d a__ : Optional[Any] =(3 * i + 5) % 16 else: a__ : Optional[Any] =c ^ (b | not_aa(SCREAMING_SNAKE_CASE )) a__ : str =(7 * i) % 16 a__ : Optional[Any] =(f + a + added_consts[i] + block_words[g]) % 2**32 a__ : List[Any] =d a__ : List[str] =c a__ : int =b a__ : Dict =sum_aa(SCREAMING_SNAKE_CASE , left_rotate_aa(SCREAMING_SNAKE_CASE , shift_amounts[i] ) ) # Add hashed chunk to running total a__ : Union[str, Any] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ : List[Any] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ : int =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ : List[str] =sum_aa(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) a__ : Tuple =reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE ) + reformat_hex(SCREAMING_SNAKE_CASE ) return digest if __name__ == "__main__": import doctest doctest.testmod()
95
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' # Base Case if index == len(UpperCamelCase_ ): return True # Recursive Step for i in range(UpperCamelCase_ ): if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]: '''simple docstring''' UpperCamelCase = [-1] * len(UpperCamelCase_ ) if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ): return colored_vertices return []
343
0
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _snake_case ( lowercase__ , lowercase__=7 ): _lowerCamelCase : List[str] = None if token is not None: _lowerCamelCase : Dict = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''} # The id of a workflow (not of a workflow run) _lowerCamelCase : int = '636036' _lowerCamelCase : str = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' _lowerCamelCase : Optional[int] = requests.get(lowercase__ , headers=lowercase__ ).json() return result["workflow_runs"] def _snake_case ( lowercase__ ): _lowerCamelCase : Optional[Any] = get_daily_ci_runs(lowercase__ ) _lowerCamelCase : List[str] = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": _lowerCamelCase : Optional[Any] = workflow_run['id'] break return workflow_run_id def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): _lowerCamelCase : Optional[int] = get_last_daily_ci_runs(lowercase__ ) if workflow_run_id is not None: _lowerCamelCase : Tuple = get_artifacts_links(worflow_run_id=lowercase__ , token=lowercase__ ) for artifact_name in artifact_names: if artifact_name in artifacts_links: _lowerCamelCase : Dict = artifacts_links[artifact_name] download_artifact( artifact_name=lowercase__ , artifact_url=lowercase__ , output_dir=lowercase__ , token=lowercase__ ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): get_last_daily_ci_artifacts(lowercase__ , lowercase__ , lowercase__ ) _lowerCamelCase : Any = {} for artifact_name in artifact_names: _lowerCamelCase : Dict = os.path.join(lowercase__ , f'''{artifact_name}.zip''' ) if os.path.isfile(lowercase__ ): _lowerCamelCase : Any = {} with zipfile.ZipFile(lowercase__ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase__ ): # read the file with z.open(lowercase__ ) as f: _lowerCamelCase : Tuple = f.read().decode('UTF-8' ) return results
96
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase( ) -> List[str]: '''simple docstring''' UpperCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase_ ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs] return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase = json.load(lowerCamelCase_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self : str ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : str ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(lowerCamelCase_ ): try: UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCamelCase = get_pairs(lowerCamelCase_ ) UpperCamelCase = """ """.join(lowerCamelCase_ ) UpperCamelCase = word return word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , lowerCamelCase_ ): UpperCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ): """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = """""".join(lowerCamelCase_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCamelCase = 0 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
343
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class lowercase ( unittest.TestCase ): """simple docstring""" def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = tempfile.mkdtemp() # fmt: off UpperCamelCase__ :int = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on UpperCamelCase__ :int = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) UpperCamelCase__ :Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] UpperCamelCase__ :Union[str, Any] = {'''unk_token''': '''<unk>'''} UpperCamelCase__ :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCamelCase__ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCamelCase_ ) ) UpperCamelCase__ :Union[str, Any] = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } UpperCamelCase__ :Any = os.path.join(self.tmpdirname , UpperCamelCase_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def lowerCAmelCase__ ( self , **UpperCamelCase_ ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCAmelCase__ ( self , **UpperCamelCase_ ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCAmelCase__ ( self , **UpperCamelCase_ ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase__ :Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[int] = self.get_tokenizer() UpperCamelCase__ :Dict = self.get_rust_tokenizer() UpperCamelCase__ :Dict = self.get_image_processor() UpperCamelCase__ :Optional[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCamelCase__ :str = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCamelCase__ :Any = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase__ :Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCamelCase__ :Optional[Any] = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 ) UpperCamelCase__ :List[str] = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.get_image_processor() UpperCamelCase__ :str = self.get_tokenizer() UpperCamelCase__ :List[Any] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCamelCase__ :str = self.prepare_image_inputs() UpperCamelCase__ :Optional[int] = image_processor(UpperCamelCase_ , return_tensors='''np''' ) UpperCamelCase__ :Dict = processor(images=UpperCamelCase_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.get_image_processor() UpperCamelCase__ :Optional[Any] = self.get_tokenizer() UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCamelCase__ :List[str] = '''lower newer''' UpperCamelCase__ :str = processor(text=UpperCamelCase_ ) UpperCamelCase__ :Any = tokenizer(UpperCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.get_image_processor() UpperCamelCase__ :Tuple = self.get_tokenizer() UpperCamelCase__ :List[str] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCamelCase__ :Tuple = '''lower newer''' UpperCamelCase__ :List[Any] = self.prepare_image_inputs() UpperCamelCase__ :List[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Any = self.get_image_processor() UpperCamelCase__ :Any = self.get_tokenizer() UpperCamelCase__ :Dict = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCamelCase__ :List[str] = self.prepare_image_inputs() UpperCamelCase__ :Optional[int] = self.prepare_image_inputs() UpperCamelCase__ :List[Any] = processor(images=UpperCamelCase_ , visual_prompt=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.get_image_processor() UpperCamelCase__ :Optional[Any] = self.get_tokenizer() UpperCamelCase__ :Optional[int] = CLIPSegProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) UpperCamelCase__ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase__ :Union[str, Any] = processor.batch_decode(UpperCamelCase_ ) UpperCamelCase__ :Dict = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
97
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _SCREAMING_SNAKE_CASE = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]), ("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 2_3), ("""JH 9H TH KH QH""", 2_2), ("""JC KH JS JD JH""", 2_1), ("""KH KC 3S 3H 3D""", 2_0), ("""8C 9C 5C 3C TC""", 1_9), ("""JS QS 9H TS KH""", 1_8), ("""7C 7S KH 2H 7H""", 1_7), ("""3C KH 5D 5S KH""", 1_6), ("""QH 8H KD JH 8S""", 1_5), ("""2D 6D 9D TH 7D""", 1_4), ) def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) ) UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowercase( UpperCamelCase_ = 100 ) -> List[Any]: '''simple docstring''' return (generate_random_hand() for _ in range(UpperCamelCase_ )) @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: '''simple docstring''' UpperCamelCase = PokerHand(UpperCamelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(UpperCamelCase_ ) UpperCamelCase = chain(sorted(UpperCamelCase_ ) ) for index, hand in enumerate(UpperCamelCase_ ): assert hand == poker_hands[index] def lowercase( ) -> Union[str, Any]: '''simple docstring''' # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=UpperCamelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowercase( ) -> str: '''simple docstring''' # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowercase( ) -> int: '''simple docstring''' # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" ) with open(UpperCamelCase_ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ ) UpperCamelCase = player.compare_with(UpperCamelCase_ ) if output == "Win": answer += 1 assert answer == 376
343
0
"""simple docstring""" import heapq def a_ ( lowerCamelCase ): UpperCAmelCase__ = [] # for each node and his adjacency list add them and the rank of the node to queue # using heapq module the queue will be filled like a Priority Queue # heapq works with a min priority queue, so I used -1*len(v) to build it for key, value in graph.items(): # O(log(n)) heapq.heappush(lowerCamelCase , [-1 * len(lowerCamelCase ), (key, value)] ) # chosen_vertices = set of chosen vertices UpperCAmelCase__ = set() # while queue isn't empty and there are still edges # (queue[0][0] is the rank of the node with max rank) while queue and queue[0][0] != 0: # extract vertex with max rank from queue and add it to chosen_vertices UpperCAmelCase__ = heapq.heappop(lowerCamelCase )[1][0] chosen_vertices.add(lowerCamelCase ) # Remove all arcs adjacent to argmax for elem in queue: # if v haven't adjacent node, skip if elem[0] == 0: continue # if argmax is reachable from elem # remove argmax from elem's adjacent list and update his rank if argmax in elem[1][1]: UpperCAmelCase__ = elem[1][1].index(lowerCamelCase ) del elem[1][1][index] elem[0] += 1 # re-order the queue heapq.heapify(lowerCamelCase ) return chosen_vertices if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ : Optional[int] = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
98
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """xlnet""" __lowerCAmelCase = ["""mems"""] __lowerCAmelCase = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = n_layer UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) UpperCamelCase = d_model // n_head UpperCamelCase = ff_activation UpperCamelCase = d_inner UpperCamelCase = untie_r UpperCamelCase = attn_type UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = dropout UpperCamelCase = mem_len UpperCamelCase = reuse_len UpperCamelCase = bi_data UpperCamelCase = clamp_len UpperCamelCase = same_length UpperCamelCase = summary_type UpperCamelCase = summary_use_proj UpperCamelCase = summary_activation UpperCamelCase = summary_last_dropout UpperCamelCase = start_n_top UpperCamelCase = end_n_top UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , lowerCamelCase_ , ) UpperCamelCase = kwargs["""use_cache"""] UpperCamelCase = use_mems_eval UpperCamelCase = use_mems_train super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
343
0
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata lowercase : Tuple = """""" if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""): class A__ ( tr.AbstractTransform ): """simple docstring""" def __init__( self , lowercase = " ") -> Tuple: '''simple docstring''' a__ : Tuple = sentence_delimiter def __lowercase ( self , lowercase) -> Optional[int]: '''simple docstring''' return list(lowercase) def __lowercase ( self , lowercase) -> Dict: '''simple docstring''' a__ : Tuple = [] for sent_idx, sentence in enumerate(lowercase): chars.extend(self.process_string(lowercase)) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase) - 1: chars.append(self.sentence_delimiter) return chars lowercase : Union[str, Any] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: lowercase : List[str] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) lowercase : List[Any] = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ lowercase : Optional[int] = """\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. """ lowercase : Optional[Any] = """ Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> cer = datasets.load_metric(\"cer\") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): """simple docstring""" def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence'), 'references': datasets.Value('string' , id='sequence'), }) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[ 'https://en.wikipedia.org/wiki/Word_error_rate', 'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates', ] , ) def __lowercase ( self , lowercase , lowercase , lowercase=False) -> Any: '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , )["wer"] a__ : Optional[int] = 0 a__ : str = 0 for prediction, reference in zip(lowercase , lowercase): a__ : Optional[int] = jiwer.compute_measures( lowercase , lowercase , truth_transform=lowercase , hypothesis_transform=lowercase , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
99
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""") class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = 0 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved UpperCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase_ ( self : Any ): """simple docstring""" class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = True try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
343
0
"""simple docstring""" from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = cva.getAffineTransform(UpperCamelCase_ , UpperCamelCase_ ) return cva.warpAffine(UpperCamelCase_ , UpperCamelCase_ , (rows, cols) ) if __name__ == "__main__": # read original image __magic_name__ = cva.imread( str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg") ) # turn image in gray scale value __magic_name__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __magic_name__, __magic_name__ = gray_img.shape # set different points to rotate image __magic_name__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __magic_name__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __magic_name__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __magic_name__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __magic_name__ = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __magic_name__ = plt.figure(1) __magic_name__ = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, "gray") plt.title(titles[i]) plt.axis("off") plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
100
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X2_0000 and cp <= 0X2_A6DF) # or (cp >= 0X2_A700 and cp <= 0X2_B73F) # or (cp >= 0X2_B740 and cp <= 0X2_B81F) # or (cp >= 0X2_B820 and cp <= 0X2_CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2_F800 and cp <= 0X2_FA1F) # ): # return True return False def lowercase( UpperCamelCase_ ) -> Dict: '''simple docstring''' # word like '180' or '身高' or '神' for char in word: UpperCamelCase = ord(UpperCamelCase_ ) if not _is_chinese_char(UpperCamelCase_ ): return 0 return 1 def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' UpperCamelCase = set() for token in tokens: UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ ) if chinese_word: word_set.add(UpperCamelCase_ ) UpperCamelCase = list(UpperCamelCase_ ) return word_list def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] ) UpperCamelCase = bert_tokens UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ ) while start < end: UpperCamelCase = True if is_chinese(bert_word[start] ): UpperCamelCase = min(end - start , UpperCamelCase_ ) for i in range(UpperCamelCase_ , 1 , -1 ): UpperCamelCase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): UpperCamelCase = """##""" + bert_word[j] UpperCamelCase = start + i UpperCamelCase = False break if single_word: start += 1 return bert_word def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0] UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res] ltp_res.extend(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = [] for id in input_ids: UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ ) input_tokens.append(UpperCamelCase_ ) UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase_ ): if token[:2] == "##": UpperCamelCase = token[2:] # save chinese tokens' pos if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ): ref_id.append(UpperCamelCase_ ) ref_ids.append(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) return ref_ids def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: UpperCamelCase = f.readlines() UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' UpperCamelCase = LTP(args.ltp ) # faster in GPU device UpperCamelCase = BertTokenizer.from_pretrained(args.bert ) UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids] f.writelines(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
343
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase__ :List[str] = { "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :int = ["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ :int = [ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
101
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 1 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (self.image_size, self.image_size) UpperCamelCase = (self.patch_size, self.patch_size) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = FlaxViTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest("""JIT Enabled""" ): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) UpperCamelCase = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase_ )
343
0
"""simple docstring""" from statistics import mean import numpy as np def lowercase ( _snake_case : list , _snake_case : list , _snake_case : list , _snake_case : int ) ->list: """simple docstring""" __snake_case : List[str] = 0 # Number of processes finished __snake_case : List[str] = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __snake_case : Optional[int] = [0] * no_of_process # List to include calculation results __snake_case : Tuple = [0] * no_of_process # Sort by arrival time. __snake_case : int = [burst_time[i] for i in np.argsort(_snake_case )] __snake_case : str = [process_name[i] for i in np.argsort(_snake_case )] arrival_time.sort() while no_of_process > finished_process_count: __snake_case : List[str] = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __snake_case : List[Any] = arrival_time[i] __snake_case : Union[str, Any] = 0 # Index showing the location of the process being performed __snake_case : Dict = 0 # Saves the current response ratio. __snake_case : List[Any] = 0 for i in range(0 , _snake_case ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __snake_case : int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __snake_case : List[str] = temp __snake_case : Any = i # Calculate the turn around time __snake_case : int = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __snake_case : Union[str, Any] = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase ( _snake_case : list , _snake_case : list , _snake_case : list , _snake_case : int ) ->list: """simple docstring""" __snake_case : Optional[int] = [0] * no_of_process for i in range(0 , _snake_case ): __snake_case : int = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": SCREAMING_SNAKE_CASE : Union[str, Any] = 5 SCREAMING_SNAKE_CASE : Dict = ["""A""", """B""", """C""", """D""", """E"""] SCREAMING_SNAKE_CASE : List[str] = [1, 2, 3, 4, 5] SCREAMING_SNAKE_CASE : Union[str, Any] = [1, 2, 3, 4, 5] SCREAMING_SNAKE_CASE : List[Any] = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) SCREAMING_SNAKE_CASE : Union[str, Any] = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("""Process name \tArrival time \tBurst time \tTurn around time \tWaiting time""") for i in range(0, no_of_process): print( F'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t' F'{turn_around_time[i]}\t\t\t{waiting_time[i]}' ) print(F'average waiting time : {mean(waiting_time):.5f}') print(F'average turn around time : {mean(turn_around_time):.5f}')
102
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ : def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = range_bbox def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase = bbox[i, j, 3] UpperCamelCase = bbox[i, j, 1] UpperCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase = bbox[i, j, 2] UpperCamelCase = bbox[i, j, 0] UpperCamelCase = t UpperCamelCase = None if self.use_input_mask: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __lowerCAmelCase = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ): """simple docstring""" return True def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = LiltModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ ) UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ ) UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ ) UpperCamelCase = torch.Size([1, 2, 768] ) UpperCamelCase = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
343
0
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __snake_case ( unittest.TestCase ): def UpperCAmelCase__ ( self : List[Any]): lowerCAmelCase_ : Tuple = 1_0 def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ : List[str] = [1, 2, 3, 4] lowerCAmelCase_ : Tuple = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_) def UpperCAmelCase__ ( self : Union[str, Any]): lowerCAmelCase_ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowerCAmelCase_ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_) def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowerCAmelCase_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(A_ , self.block_size , 0) , A_) def UpperCAmelCase__ ( self : Optional[int]): lowerCAmelCase_ : Optional[int] = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' lowerCAmelCase_ , lowerCAmelCase_ : Dict = process_story(A_) self.assertEqual(A_ , []) def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : Optional[Any] = '''''' lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = process_story(A_) self.assertEqual(A_ , []) self.assertEqual(A_ , []) def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : Union[str, Any] = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = process_story(A_) lowerCAmelCase_ : str = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(A_ , A_) lowerCAmelCase_ : Dict = ['''It was the best of times.'''] self.assertEqual(A_ , A_) def UpperCAmelCase__ ( self : Optional[Any]): lowerCAmelCase_ : int = torch.tensor([1, 2, 3, 4]) lowerCAmelCase_ : List[Any] = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(A_ , 0).numpy() , expected.numpy()) def UpperCAmelCase__ ( self : Optional[int]): lowerCAmelCase_ : str = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3]) lowerCAmelCase_ : int = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(A_ , 2_3).numpy() , expected.numpy()) def UpperCAmelCase__ ( self : Any): lowerCAmelCase_ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1]) lowerCAmelCase_ : Union[str, Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(A_ , 1).numpy() , expected.numpy()) def UpperCAmelCase__ ( self : str): lowerCAmelCase_ : Optional[Any] = 1_0_1 lowerCAmelCase_ : Optional[int] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]]) lowerCAmelCase_ : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) lowerCAmelCase_ : Optional[int] = compute_token_type_ids(A_ , A_) np.testing.assert_array_equal(A_ , A_)
103
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ): """simple docstring""" UpperCamelCase = size if size is not None else {"""shortest_edge""": 30} UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize_and_center_crop UpperCamelCase = size UpperCamelCase = crop_pct UpperCamelCase = crop_size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = PoolFormerImageProcessingTester(self ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
343
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class lowercase_ : """simple docstring""" def __init__( self : Tuple ,lowercase__ : int ,lowercase__ : str=1_3 ,lowercase__ : Union[str, Any]=7 ,lowercase__ : List[str]=True ,lowercase__ : List[str]=True ,lowercase__ : List[str]=True ,lowercase__ : Any=True ,lowercase__ : str=9_9 ,lowercase__ : List[Any]=6_4 ,lowercase__ : Tuple=5 ,lowercase__ : List[Any]=4 ,lowercase__ : Any=3_7 ,lowercase__ : str="gelu" ,lowercase__ : Any=0.1 ,lowercase__ : Union[str, Any]=0.1 ,lowercase__ : List[Any]=5_1_2 ,lowercase__ : int=1_6 ,lowercase__ : Dict=2 ,lowercase__ : Dict=0.0_2 ,lowercase__ : str=3 ,lowercase__ : Optional[Any]=4 ,lowercase__ : int=None ,): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope __lowercase = vocab_size - 1 def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) __lowercase = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : int ): return GPTNeoXConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=lowercase__ ,initializer_range=self.initializer_range ,pad_token_id=self.pad_token_id ,) def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase , __lowercase , __lowercase , __lowercase = self.prepare_config_and_inputs() __lowercase = True return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Tuple ,lowercase__ : Dict ,lowercase__ : Any ): __lowercase = GPTNeoXModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ) __lowercase = model(lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Any ,lowercase__ : Union[str, Any] ,lowercase__ : int ): __lowercase = True __lowercase = GPTNeoXModel(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : Optional[int] ,lowercase__ : Dict ): __lowercase = GPTNeoXForCausalLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Optional[Any] ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[Any] ,lowercase__ : Optional[Any] ): __lowercase = self.num_labels __lowercase = GPTNeoXForQuestionAnswering(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : Dict ,lowercase__ : List[str] ,lowercase__ : str ): __lowercase = self.num_labels __lowercase = GPTNeoXForSequenceClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ,lowercase__ : str ,lowercase__ : Union[str, Any] ,lowercase__ : Optional[int] ): __lowercase = self.num_labels __lowercase = GPTNeoXForTokenClassification(lowercase__ ) model.to(lowercase__ ) model.eval() __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : str ,lowercase__ : Optional[Any] ,lowercase__ : Optional[int] ): __lowercase = True __lowercase = GPTNeoXForCausalLM(config=lowercase__ ) model.to(lowercase__ ) model.eval() # first forward pass __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,use_cache=lowercase__ ) __lowercase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size ) __lowercase = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and __lowercase = torch.cat([input_ids, next_tokens] ,dim=-1 ) __lowercase = torch.cat([input_mask, next_mask] ,dim=-1 ) __lowercase = model(lowercase__ ,attention_mask=lowercase__ ,output_hidden_states=lowercase__ ) __lowercase = output_from_no_past['''hidden_states'''][0] __lowercase = model( lowercase__ ,attention_mask=lowercase__ ,past_key_values=lowercase__ ,output_hidden_states=lowercase__ ,)['''hidden_states'''][0] # select random slice __lowercase = ids_tensor((1,) ,output_from_past.shape[-1] ).item() __lowercase = output_from_no_past[:, -3:, random_slice_idx].detach() __lowercase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs __lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : Union[str, Any] = (GPTNeoXForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE : Union[str, Any] = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : Optional[int] = False SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : Dict = False def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = GPTNeoXModelTester(self ) __lowercase = ConfigTester(self ,config_class=lowercase__ ,hidden_size=6_4 ,num_attention_heads=8 ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ): self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ): # This regression test was failing with PyTorch < 1.3 __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_decoder() __lowercase = None self.model_tester.create_and_check_model_as_decoder(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase , __lowercase , __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase__ ,lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase__ ) @unittest.skip(reason='''Feed forward chunking is not implemented''' ) def SCREAMING_SNAKE_CASE ( self : List[str] ): pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Union[str, Any] ): __lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common() __lowercase = ids_tensor([1, 1_0] ,config.vocab_size ) __lowercase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights __lowercase = GPTNeoXModel(lowercase__ ) original_model.to(lowercase__ ) original_model.eval() __lowercase = original_model(lowercase__ ).last_hidden_state __lowercase = original_model(lowercase__ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights __lowercase = {'''type''': scaling_type, '''factor''': 1_0.0} __lowercase = GPTNeoXModel(lowercase__ ) scaled_model.to(lowercase__ ) scaled_model.eval() __lowercase = scaled_model(lowercase__ ).last_hidden_state __lowercase = scaled_model(lowercase__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase__ ,lowercase__ ,atol=1e-5 ) ) @require_torch class lowercase_ (unittest.TestCase ): """simple docstring""" @slow def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) for checkpointing in [True, False]: __lowercase = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase__ ) __lowercase = tokenizer('''My favorite food is''' ,return_tensors='''pt''' ).to(lowercase__ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 __lowercase = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure''' __lowercase = model.generate(**lowercase__ ,do_sample=lowercase__ ,max_new_tokens=2_0 ) __lowercase = tokenizer.batch_decode(lowercase__ )[0] self.assertEqual(lowercase__ ,lowercase__ )
104
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
343
0
"""simple docstring""" import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset a : Tuple = '''bert-base-cased''' a : List[Any] = '''google/pegasus-xsum''' a : List[Any] = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] a : Dict = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] a : str = '''patrickvonplaten/t5-tiny-random''' a : List[str] = '''sshleifer/bart-tiny-random''' a : int = '''sshleifer/tiny-mbart''' a : int = '''sshleifer/tiny-marian-en-de''' def _SCREAMING_SNAKE_CASE ( _lowercase : Path , _lowercase : list ) ->Optional[Any]: '''simple docstring''' a : str = "\n".join(_lowercase ) Path(_lowercase ).open("w" ).writelines(_lowercase ) def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]: '''simple docstring''' for split in ["train", "val", "test"]: _dump_articles(os.path.join(_lowercase , F"""{split}.source""" ) , _lowercase ) _dump_articles(os.path.join(_lowercase , F"""{split}.target""" ) , _lowercase ) return tmp_dir class __UpperCamelCase ( a__ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def __a ( self , lowerCAmelCase__ ) -> Optional[Any]: a : List[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) a : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) a : Tuple = max(len(tokenizer.encode(lowerCAmelCase__ ) ) for a in ARTICLES ) a : Dict = max(len(tokenizer.encode(lowerCAmelCase__ ) ) for a in SUMMARIES ) a : int = 4 a : Dict = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated a, a : str = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. a : Union[str, Any] = SeqaSeqDataset( lowerCAmelCase__ , data_dir=lowerCAmelCase__ , type_path="train" , max_source_length=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , ) a : List[Any] = DataLoader(lowerCAmelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place a : Optional[int] = shift_tokens_right(batch["labels"] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def __a ( self , lowerCAmelCase__ ) -> Dict: a : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) a : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) a : Optional[int] = max(len(tokenizer.encode(lowerCAmelCase__ ) ) for a in ARTICLES ) a : str = max(len(tokenizer.encode(lowerCAmelCase__ ) ) for a in SUMMARIES ) a : str = 4 a : Dict = LegacySeqaSeqDataset( lowerCAmelCase__ , data_dir=lowerCAmelCase__ , type_path="train" , max_source_length=20 , max_target_length=lowerCAmelCase__ , ) a : str = DataLoader(lowerCAmelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def __a ( self ) -> Dict: a : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25" ) a : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) a : Any = tmp_dir.joinpath("train.source" ).open().readlines() a : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(lowerCAmelCase__ , lowerCAmelCase__ , 128 , lowerCAmelCase__ ) a : List[str] = {x.name for x in tmp_dir.iterdir()} a : Tuple = {x.name for x in save_dir.iterdir()} a : Tuple = save_dir.joinpath("train.source" ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowerCAmelCase__ ) < len(lowerCAmelCase__ ) assert len(lowerCAmelCase__ ) == 1 assert len(packed_examples[0] ) == sum(len(lowerCAmelCase__ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="This test requires fairseq" ) def __a ( self ) -> int: if not FAIRSEQ_AVAILABLE: return a, a, a : List[str] = self._get_dataset(max_len=64 ) a : str = 64 a : Optional[int] = ds.make_dynamic_sampler(lowerCAmelCase__ , required_batch_size_multiple=lowerCAmelCase__ ) a : Dict = [len(lowerCAmelCase__ ) for x in batch_sampler] assert len(set(lowerCAmelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) # no dropped or added examples a : str = DataLoader(lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=ds.collate_fn , num_workers=2 ) a : Optional[Any] = [] a : Optional[int] = [] for batch in data_loader: a : int = batch["input_ids"].shape a : Dict = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple a : Tuple = np.product(batch["input_ids"].shape ) num_src_per_batch.append(lowerCAmelCase__ ) if num_src_tokens > (max_tokens * 1.1): failures.append(lowerCAmelCase__ ) assert num_src_per_batch[0] == max(lowerCAmelCase__ ) if failures: raise AssertionError(f"""too many tokens in {len(lowerCAmelCase__ )} batches""" ) def __a ( self ) -> Any: a, a, a : Optional[int] = self._get_dataset(max_len=512 ) a : Optional[int] = 2 a : List[str] = ds.make_sortish_sampler(lowerCAmelCase__ , shuffle=lowerCAmelCase__ ) a : Union[str, Any] = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn , num_workers=2 ) a : Any = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowerCAmelCase__ ) a : List[Any] = tokenizer.pad_token_id def count_pad_tokens(lowerCAmelCase__ , lowerCAmelCase__="input_ids" ): return [batch[k].eq(lowerCAmelCase__ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowerCAmelCase__ , k="labels" ) ) < sum(count_pad_tokens(lowerCAmelCase__ , k="labels" ) ) assert sum(count_pad_tokens(lowerCAmelCase__ ) ) < sum(count_pad_tokens(lowerCAmelCase__ ) ) assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__=1000 , lowerCAmelCase__=128 ) -> Dict: if os.getenv("USE_REAL_DATA" , lowerCAmelCase__ ): a : Tuple = "examples/seq2seq/wmt_en_ro" a : Optional[Any] = max_len * 2 * 64 if not Path(lowerCAmelCase__ ).joinpath("train.len" ).exists(): save_len_file(lowerCAmelCase__ , lowerCAmelCase__ ) else: a : Tuple = "examples/seq2seq/test_data/wmt_en_ro" a : Optional[int] = max_len * 4 save_len_file(lowerCAmelCase__ , lowerCAmelCase__ ) a : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase__ ) a : Tuple = SeqaSeqDataset( lowerCAmelCase__ , data_dir=lowerCAmelCase__ , type_path="train" , max_source_length=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , ) return ds, max_tokens, tokenizer def __a ( self ) -> Optional[Any]: a, a, a : Tuple = self._get_dataset() a : List[str] = set(DistributedSortishSampler(lowerCAmelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowerCAmelCase__ ) ) a : Dict = set(DistributedSortishSampler(lowerCAmelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowerCAmelCase__ ) ) assert idsa.intersection(lowerCAmelCase__ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def __a ( self , lowerCAmelCase__ ) -> Dict: a : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ , use_fast=lowerCAmelCase__ ) if tok_name == MBART_TINY: a : Optional[Any] = SeqaSeqDataset( lowerCAmelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , src_lang="EN" , tgt_lang="FR" , ) a : str = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: a : Any = SeqaSeqDataset( lowerCAmelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="train" , max_source_length=4 , max_target_length=8 , ) a : Optional[int] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowerCAmelCase__ ) == 1 if tok_name == BART_TINY else len(lowerCAmelCase__ ) == 0
105
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """trocr""" __lowerCAmelCase = ["""past_key_values"""] __lowerCAmelCase = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = activation_function UpperCamelCase = max_position_embeddings UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = init_std UpperCamelCase = decoder_layerdrop UpperCamelCase = use_cache UpperCamelCase = scale_embedding UpperCamelCase = use_learned_position_embeddings UpperCamelCase = layernorm_embedding super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
343
0
"""simple docstring""" from __future__ import annotations def __SCREAMING_SNAKE_CASE ( A_ , A_ ): if partitions <= 0: raise ValueError('''partitions must be a positive number!''' ) if partitions > number_of_bytes: raise ValueError('''partitions can not > number_of_bytes!''' ) lowerCAmelCase__ : Optional[Any] = number_of_bytes // partitions lowerCAmelCase__ : Union[str, Any] = [] for i in range(A_ ): lowerCAmelCase__ : int = i * bytes_per_partition + 1 lowerCAmelCase__ : Optional[Any] = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
106
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase = """swin""" __lowerCAmelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names ) class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return 1E-4
343
0
def __magic_name__ ( A : str, A : str ): '''simple docstring''' a = len(A ) + 1 a = len(A ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. a = [[0 for i in range(A )] for j in range(A )] # since string of zero length match pattern of zero length a = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1, A ): a = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1, A ): a = dp[0][j - 2] if pattern[j - 1] == "*" else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1, A ): for j in range(1, A ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": a = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: a = 1 elif pattern[j - 2] in (input_string[i - 1], "."): a = dp[i - 1][j] else: a = 0 else: a = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") __lowerCAmelCase : List[Any] = 'aab' __lowerCAmelCase : Optional[int] = 'c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(F'''{input_string} matches the given pattern {pattern}''') else: print(F'''{input_string} does not match with the given pattern {pattern}''')
107
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) _SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) _SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(6_4, 6_4) ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) _SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) _SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _SCREAMING_SNAKE_CASE = """Normal""" if result[0][0] == 1: _SCREAMING_SNAKE_CASE = """Abnormality detected"""
343
0
"""simple docstring""" lowerCAmelCase__ = 8.314462 # Unit - J mol-1 K-1 def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ): '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ): '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
108
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): pass class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __iter__( self : Optional[int] ): """simple docstring""" UpperCamelCase = self UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data UpperCamelCase = node.next_node @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Node(1) _SCREAMING_SNAKE_CASE = Node(2) _SCREAMING_SNAKE_CASE = Node(3) _SCREAMING_SNAKE_CASE = Node(4) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = root_node.next_node print(root_node.has_loop) # True _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = Node(1) print(root_node.has_loop) # False
343
0
"""simple docstring""" import inspect import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import is_torch_available from transformers.testing_utils import is_flaky, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin A: Optional[Any] = 1E-4 if is_torch_available(): import torch from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder @require_torch class SCREAMING_SNAKE_CASE__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=19 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=[1, 2, 3, 4, 5] , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=5 , ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[Any] = d_model UpperCAmelCase : Optional[int] = parent UpperCAmelCase : Tuple = batch_size UpperCAmelCase : Optional[int] = prediction_length UpperCAmelCase : Optional[Any] = context_length UpperCAmelCase : Optional[int] = cardinality UpperCAmelCase : Union[str, Any] = num_time_features UpperCAmelCase : Union[str, Any] = lags_sequence UpperCAmelCase : Dict = embedding_dimension UpperCAmelCase : Dict = is_training UpperCAmelCase : List[Any] = hidden_size UpperCAmelCase : List[str] = num_hidden_layers UpperCAmelCase : Any = num_attention_heads UpperCAmelCase : List[Any] = intermediate_size UpperCAmelCase : Optional[Any] = hidden_act UpperCAmelCase : str = hidden_dropout_prob UpperCAmelCase : str = attention_probs_dropout_prob UpperCAmelCase : Union[str, Any] = context_length UpperCAmelCase : int = prediction_length + label_length UpperCAmelCase : Any = label_length UpperCAmelCase : str = moving_average UpperCAmelCase : int = autocorrelation_factor def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' return AutoformerConfig( d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' UpperCAmelCase : List[Any] = config.context_length + max(config.lags_sequence ) UpperCAmelCase : Tuple = ids_tensor([self.batch_size, 1] , config.cardinality[0] ) UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] ) UpperCAmelCase : str = floats_tensor([self.batch_size, _past_length] ) UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, _past_length] ) > 0.5 # decoder inputs UpperCAmelCase : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] ) UpperCAmelCase : Dict = floats_tensor([self.batch_size, config.prediction_length] ) UpperCAmelCase : Dict = { """past_values""": past_values, """static_categorical_features""": static_categorical_features, """past_time_features""": past_time_features, """past_observed_mask""": past_observed_mask, """future_time_features""": future_time_features, """future_values""": future_values, } return inputs_dict def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Tuple = self.get_config() UpperCAmelCase : List[str] = self.prepare_autoformer_inputs_dict(_SCREAMING_SNAKE_CASE ) return config, inputs_dict def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int = self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' UpperCAmelCase : List[Any] = AutoformerModel(config=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ).eval() UpperCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = outputs.encoder_last_hidden_state UpperCAmelCase : Optional[Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : Any = model.get_encoder() encoder.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = AutoformerEncoder.from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = model.create_network_inputs(**_SCREAMING_SNAKE_CASE ) UpperCAmelCase , UpperCAmelCase : Tuple = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] ) UpperCAmelCase : int = torch.cat( (transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , ) UpperCAmelCase : Optional[Any] = encoder(inputs_embeds=_SCREAMING_SNAKE_CASE )[0] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) UpperCAmelCase : str = ( torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 ) .unsqueeze(1 ) .repeat(1 , config.prediction_length , 1 ) ) UpperCAmelCase : Optional[int] = torch.zeros( [transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , ) UpperCAmelCase : str = torch.cat( ( torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) UpperCAmelCase : Optional[int] = torch.cat( ( torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ), feature[:, config.context_length - config.label_length :, ...], ) , dim=-1 , ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase : str = model.get_decoder() decoder.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = AutoformerDecoder.from_pretrained(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = decoder( trend=_SCREAMING_SNAKE_CASE , inputs_embeds=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Any = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else () __lowerCAmelCase : Any = (AutoformerForPrediction,) if is_torch_available() else () __lowerCAmelCase : int = {'feature-extraction': AutoformerModel} if is_torch_available() else {} __lowerCAmelCase : List[Any] = False __lowerCAmelCase : Tuple = False __lowerCAmelCase : Any = False __lowerCAmelCase : Union[str, Any] = False __lowerCAmelCase : Optional[Any] = False __lowerCAmelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : int = AutoformerModelTester(self ) UpperCAmelCase : Tuple = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_SCREAMING_SNAKE_CASE ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE ) self.assertEqual(info["""missing_keys"""] , [] ) def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""Model has no tokens embeddings""" ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] = inspect.signature(getattr(_SCREAMING_SNAKE_CASE , """forward""" ) ) # The main input is the name of the argument after `self` UpperCAmelCase : Any = list(model_signature.parameters.keys() )[1] self.assertEqual(AutoformerModel.main_input_name , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : Any = [*signature.parameters.keys()] UpperCAmelCase : Optional[int] = [ """past_values""", """past_time_features""", """past_observed_mask""", """static_categorical_features""", """static_real_features""", """future_values""", """future_time_features""", ] if model.__class__.__name__ in ["AutoformerForPrediction"]: expected_arg_names.append("""future_observed_mask""" ) expected_arg_names.extend( [ """decoder_attention_mask""", """head_mask""", """decoder_head_mask""", """cross_attn_head_mask""", """encoder_outputs""", """past_key_values""", """output_hidden_states""", """output_attentions""", """use_cache""", """return_dict""", ] ) self.assertListEqual(arg_names[: len(_SCREAMING_SNAKE_CASE )] , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[Any] = True UpperCAmelCase : List[str] = getattr(self.model_tester , """seq_length""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Any = getattr(self.model_tester , """decoder_seq_length""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = getattr(self.model_tester , """encoder_seq_length""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = getattr(self.model_tester , """d_model""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = getattr(self.model_tester , """num_attention_heads""" , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[int] = d_model // num_attention_heads for model_class in self.all_model_classes: UpperCAmelCase : Any = True UpperCAmelCase : List[Any] = False UpperCAmelCase : Union[str, Any] = True UpperCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCAmelCase : List[str] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase : str = True UpperCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Any = outputs.encoder_attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) UpperCAmelCase : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : int = 7 if "last_hidden_state" in outputs: correct_outlen += 1 if "trend" in outputs: correct_outlen += 1 if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned if "loss" in outputs: correct_outlen += 1 if "params" in outputs: correct_outlen += 1 self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # decoder attentions UpperCAmelCase : str = outputs.decoder_attentions self.assertIsInstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # cross attentions UpperCAmelCase : Dict = outputs.cross_attentions self.assertIsInstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , ) # Check attention is always last and order is fine UpperCAmelCase : int = True UpperCAmelCase : Optional[Any] = True UpperCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCAmelCase : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(out_len + 2 , len(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , ) @is_flaky() def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' super().test_retain_grad_hidden_states_attentions() def _snake_case ( UpperCamelCase : Any="train-batch.pt" ): UpperCAmelCase : List[str] = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=UpperCamelCase , repo_type="""dataset""" ) UpperCAmelCase : List[str] = torch.load(UpperCamelCase , map_location=UpperCamelCase ) return batch @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : str = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = prepare_batch() with torch.no_grad(): UpperCAmelCase : Any = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0] UpperCAmelCase : Union[str, Any] = torch.Size( (64, model.config.prediction_length + model.config.label_length, model.config.feature_size) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = torch.tensor( [[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(output[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Any = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): UpperCAmelCase : Dict = model( past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state UpperCAmelCase : List[str] = torch.Size((64, model.config.context_length, model.config.d_model) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = torch.tensor( [[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(output[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=_SCREAMING_SNAKE_CASE ) ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : str = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Optional[Any] = prepare_batch("""val-batch.pt""" ) with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model.generate( static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , ) UpperCAmelCase : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) ) self.assertEqual(outputs.sequences.shape , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : Tuple = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Dict = outputs.sequences.mean(dim=1 ) self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _SCREAMING_SNAKE_CASE , rtol=1E-1 ) )
109
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , ) assert hasattr(self , """env""" ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = { """enabled""": True, """processes_per_host""": 8, } UpperCamelCase = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ): """simple docstring""" TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = self.create_estimator(lowerCamelCase_ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
343
0
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class lowerCAmelCase_ ( __lowerCAmelCase ): __lowerCamelCase : int = ["vqvae"] def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) -> Optional[int]: super().__init__() self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , mel=lowerCamelCase_ , vqvae=lowerCamelCase_ ) def _snake_case ( self ) -> Tuple: return 50 if isinstance(self.scheduler , lowerCamelCase_ ) else 1000 @torch.no_grad() def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase=True , ) -> Any: _lowerCAmelCase = steps or self.get_default_steps() self.scheduler.set_timesteps(lowerCamelCase_ ) _lowerCAmelCase = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: _lowerCAmelCase = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: _lowerCAmelCase = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=lowerCamelCase_ , device=self.device , ) _lowerCAmelCase = noise _lowerCAmelCase = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(lowerCamelCase_ , lowerCamelCase_ ) _lowerCAmelCase = self.mel.audio_slice_to_image(lowerCamelCase_ ) _lowerCAmelCase = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape( (input_image.height, input_image.width) ) _lowerCAmelCase = (input_image / 255) * 2 - 1 _lowerCAmelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: _lowerCAmelCase = self.vqvae.encode(torch.unsqueeze(lowerCamelCase_ , 0 ) ).latent_dist.sample( generator=lowerCamelCase_ )[0] _lowerCAmelCase = self.vqvae.config.scaling_factor * input_images if start_step > 0: _lowerCAmelCase = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , self.scheduler.timesteps[start_step - 1] ) _lowerCAmelCase = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) _lowerCAmelCase = int(mask_start_secs * pixels_per_second ) _lowerCAmelCase = int(mask_end_secs * pixels_per_second ) _lowerCAmelCase = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , lowerCamelCase_ ): _lowerCAmelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )["sample"] else: _lowerCAmelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ )["sample"] if isinstance(self.scheduler , lowerCamelCase_ ): _lowerCAmelCase = self.scheduler.step( model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , eta=lowerCamelCase_ , generator=lowerCamelCase_ , )["prev_sample"] else: _lowerCAmelCase = self.scheduler.step( model_output=lowerCamelCase_ , timestep=lowerCamelCase_ , sample=lowerCamelCase_ , generator=lowerCamelCase_ , )["prev_sample"] if mask is not None: if mask_start > 0: _lowerCAmelCase = mask[:, step, :, :mask_start] if mask_end > 0: _lowerCAmelCase = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance _lowerCAmelCase = 1 / self.vqvae.config.scaling_factor * images _lowerCAmelCase = self.vqvae.decode(lowerCamelCase_ )["sample"] _lowerCAmelCase = (images / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() _lowerCAmelCase = (images * 255).round().astype("uint8" ) _lowerCAmelCase = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(lowerCamelCase_ , mode="RGB" ).convert("L" ) for _ in images) ) _lowerCAmelCase = [self.mel.image_to_audio(lowerCamelCase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(lowerCamelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCamelCase_ ) ) @torch.no_grad() def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = 50 ) -> Optional[Any]: assert isinstance(self.scheduler , lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ ) _lowerCAmelCase = np.array( [np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] ) _lowerCAmelCase = (sample / 255) * 2 - 1 _lowerCAmelCase = torch.Tensor(lowerCamelCase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): _lowerCAmelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps _lowerCAmelCase = self.scheduler.alphas_cumprod[t] _lowerCAmelCase = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) _lowerCAmelCase = 1 - alpha_prod_t _lowerCAmelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ )["sample"] _lowerCAmelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output _lowerCAmelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) _lowerCAmelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _snake_case ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = acos(torch.dot(torch.flatten(lowerCamelCase_ ) , torch.flatten(lowerCamelCase_ ) ) / torch.norm(lowerCamelCase_ ) / torch.norm(lowerCamelCase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(lowerCamelCase_ ) + sin(alpha * theta ) * xa / sin(lowerCamelCase_ )
158
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
343
0
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str=1_3 , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=9_9 , lowerCAmelCase_ : str=2_4 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[str]=6 , lowerCAmelCase_ : List[Any]=3_7 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Any=5_1_2 , lowerCAmelCase_ : List[Any]=1_6 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : int=0.02 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[Any]=1_0_0_0 , ): """simple docstring""" lowercase_ = parent lowercase_ = batch_size lowercase_ = seq_length lowercase_ = is_training lowercase_ = use_input_mask lowercase_ = use_token_type_ids lowercase_ = use_labels lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = num_labels lowercase_ = scope lowercase_ = range_bbox def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: lowercase_ = bbox[i, j, 3] lowercase_ = bbox[i, j, 1] lowercase_ = t if bbox[i, j, 2] < bbox[i, j, 0]: lowercase_ = bbox[i, j, 2] lowercase_ = bbox[i, j, 0] lowercase_ = t lowercase_ = None if self.use_input_mask: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) lowercase_ = None if self.use_token_type_ids: lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase_ = None lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase_ = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _UpperCAmelCase ( self : List[Any]): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , ): """simple docstring""" lowercase_ = LiltModel(config=lowerCamelCase_) model.to(lowerCamelCase_) model.eval() lowercase_ = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_) lowercase_ = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_) lowercase_ = model(lowerCamelCase_ , bbox=lowerCamelCase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , ): """simple docstring""" lowercase_ = self.num_labels lowercase_ = LiltForTokenClassification(config=lowerCamelCase_) model.to(lowerCamelCase_) model.eval() lowercase_ = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , ): """simple docstring""" lowercase_ = LiltForQuestionAnswering(config=lowerCamelCase_) model.to(lowerCamelCase_) model.eval() lowercase_ = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) = config_and_inputs lowercase_ = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): lowercase__ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowercase__ = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = False def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict): """simple docstring""" return True def _UpperCAmelCase ( self : List[Any]): """simple docstring""" lowercase_ = LiltModelTester(self) lowercase_ = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7) def _UpperCAmelCase ( self : Any): """simple docstring""" self.config_tester.run_common_tests() def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_) def _UpperCAmelCase ( self : Dict): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ = type self.model_tester.create_and_check_model(*lowerCamelCase_) def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_) def _UpperCAmelCase ( self : Tuple): """simple docstring""" lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_) @slow def _UpperCAmelCase ( self : Optional[Any]): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = LiltModel.from_pretrained(lowerCamelCase_) self.assertIsNotNone(lowerCamelCase_) @require_torch @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def _UpperCAmelCase ( self : List[str]): """simple docstring""" lowercase_ = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""").to(lowerCamelCase_) lowercase_ = torch.tensor([[1, 2]] , device=lowerCamelCase_) lowercase_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_) # forward pass with torch.no_grad(): lowercase_ = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_) lowercase_ = torch.Size([1, 2, 7_6_8]) lowercase_ = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=lowerCamelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3))
136
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ShapEPipeline __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __lowerCAmelCase = False @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return 8 @property def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(lowerCamelCase_ ) @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase = PriorTransformer(**lowerCamelCase_ ) return model @property def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase = ShapERenderer(**lowerCamelCase_ ) return model def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_text_encoder UpperCamelCase = self.dummy_tokenizer UpperCamelCase = self.dummy_renderer UpperCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , ) UpperCamelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ): """simple docstring""" if str(lowerCamelCase_ ).startswith("""mps""" ): UpperCamelCase = torch.manual_seed(lowerCamelCase_ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) UpperCamelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = """cpu""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) UpperCamelCase = output.images[0] UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self : Tuple ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch_device == """cpu""" UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase = batch_size * [inputs[key]] UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCamelCase = pipe( """a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
343
0
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList a = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif'''] class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : int=1 ): _A = tokenizer _A = dataset _A = len(lowerCamelCase_ ) if n_tasks is None else n_tasks _A = n_copies def __iter__( self : Dict ): _A = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() ) _A = self.tokenizer(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='pt' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any ): _A = start_length _A = eof_strings _A = tokenizer def __call__( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ): _A = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) _A = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(lowerCamelCase_ ) def _snake_case ( _snake_case : str ) -> List[str]: '''simple docstring''' _A = re.split('(%s)' % '|'.join(UpperCamelCase_ ) , UpperCamelCase_ ) # last string should be "" return "".join(string_list[:-2] ) def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Union[str, Any] , _snake_case : Optional[int]=20 , **_snake_case : List[str] ) -> List[str]: '''simple docstring''' _A = defaultdict(UpperCamelCase_ ) # dict of list of generated tokens for step, batch in tqdm(enumerate(UpperCamelCase_ ) ): with torch.no_grad(): _A = batch['ids'].shape[-1] _A = accelerator.unwrap_model(UpperCamelCase_ ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=UpperCamelCase_ , **UpperCamelCase_ ) # each task is generated batch_size times _A = batch['task_id'].repeat(UpperCamelCase_ ) _A = accelerator.pad_across_processes( UpperCamelCase_ , dim=1 , pad_index=tokenizer.pad_token_id ) _A , _A = accelerator.gather((generated_tokens, generated_tasks) ) _A = generated_tokens.cpu().numpy() _A = generated_tasks.cpu().numpy() for task, generated_tokens in zip(UpperCamelCase_ , UpperCamelCase_ ): gen_token_dict[task].append(UpperCamelCase_ ) _A = [[] for _ in range(UpperCamelCase_ )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: _A = tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) code_gens[task].append(remove_last_block(UpperCamelCase_ ) ) return code_gens def _snake_case ( ) -> str: '''simple docstring''' _A = HfArgumentParser(UpperCamelCase_ ) _A = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric _A = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing _A = 'false' if args.num_workers is None: _A = multiprocessing.cpu_count() # Use dataset load to feed to accelerate _A = Accelerator() set_seed(args.seed , device_specific=UpperCamelCase_ ) # Load model and tokenizer _A = AutoTokenizer.from_pretrained(args.model_ckpt ) _A = tokenizer.eos_token _A = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings _A = { 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCamelCase_ , UpperCamelCase_ )] ), } # Load evaluation dataset and metric _A = load_dataset('openai_humaneval' ) _A = load_metric('code_eval' ) _A = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) _A = args.n_samples // args.batch_size _A = TokenizedDataset(UpperCamelCase_ , human_eval['test'] , n_copies=UpperCamelCase_ , n_tasks=UpperCamelCase_ ) # do not confuse args.batch_size, which is actually the num_return_sequences _A = DataLoader(UpperCamelCase_ , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: _A = code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`' ' flag to enable code evaluation.' ) raise exception _A , _A = accelerator.prepare(UpperCamelCase_ , UpperCamelCase_ ) _A = complete_code( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , n_tasks=UpperCamelCase_ , batch_size=args.batch_size , **UpperCamelCase_ , ) if accelerator.is_main_process: _A = [] for task in tqdm(range(UpperCamelCase_ ) ): _A = human_eval['test'][task]['test'] _A = F'''check({human_eval["test"][task]["entry_point"]})''' references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric _A , _A = code_eval_metric.compute( references=UpperCamelCase_ , predictions=UpperCamelCase_ , num_workers=args.num_workers ) print(F'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
315
from __future__ import annotations def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list: '''simple docstring''' UpperCamelCase = [] UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCamelCase = result + left + right return input_list def lowercase( UpperCamelCase_ ) -> list: '''simple docstring''' if len(UpperCamelCase_ ) <= 1: return input_list UpperCamelCase = list(UpperCamelCase_ ) # iteration for two-way merging UpperCamelCase = 2 while p <= len(UpperCamelCase_ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = i + p - 1 UpperCamelCase = (low + high + 1) // 2 UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # final merge of last two parts if p * 2 >= len(UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
343
0
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class A ( unittest.TestCase ): def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int]=13 , lowercase_ : Union[str, Any]=30 , lowercase_ : str=2 , lowercase_ : Optional[int]=3 , lowercase_ : Union[str, Any]=True , lowercase_ : List[str]=True , lowercase_ : List[str]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Optional[Any]=4 , lowercase_ : Any=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : str=0.1 , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[Any]=0.02 , ) -> Optional[Any]: """simple docstring""" _lowerCamelCase : Optional[int] =parent _lowerCamelCase : Optional[Any] =batch_size _lowerCamelCase : Optional[Any] =image_size _lowerCamelCase : List[Any] =patch_size _lowerCamelCase : Tuple =num_channels _lowerCamelCase : List[str] =is_training _lowerCamelCase : Optional[Any] =use_labels _lowerCamelCase : int =hidden_size _lowerCamelCase : List[str] =num_hidden_layers _lowerCamelCase : List[str] =num_attention_heads _lowerCamelCase : int =intermediate_size _lowerCamelCase : str =hidden_act _lowerCamelCase : Union[str, Any] =hidden_dropout_prob _lowerCamelCase : List[Any] =attention_probs_dropout_prob _lowerCamelCase : Optional[Any] =type_sequence_label_size _lowerCamelCase : List[str] =initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCamelCase : Dict =(image_size // patch_size) ** 2 _lowerCamelCase : Dict =num_patches + 1 def lowerCamelCase ( self : str ) -> Any: """simple docstring""" _lowerCamelCase : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCamelCase : Optional[Any] =ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCamelCase ( self : str , lowercase_ : List[str] , lowercase_ : Tuple ) -> List[Any]: """simple docstring""" _lowerCamelCase : Optional[Any] =FlaxViTModel(config=lowerCamelCase_ ) _lowerCamelCase : Tuple =model(lowerCamelCase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) _lowerCamelCase : Optional[Any] =(self.image_size, self.image_size) _lowerCamelCase : List[Any] =(self.patch_size, self.patch_size) _lowerCamelCase : Any =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCamelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : Optional[Any] ) -> Optional[Any]: """simple docstring""" _lowerCamelCase : int =self.type_sequence_label_size _lowerCamelCase : Dict =FlaxViTForImageClassification(config=lowerCamelCase_ ) _lowerCamelCase : Optional[int] =model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCamelCase : Tuple =1 _lowerCamelCase : Tuple =FlaxViTForImageClassification(lowerCamelCase_ ) _lowerCamelCase : Tuple =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCamelCase : str =model(lowerCamelCase_ ) def lowerCamelCase ( self : Dict ) -> str: """simple docstring""" _lowerCamelCase : List[str] =self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : List[Any] =config_and_inputs _lowerCamelCase : str ={'pixel_values': pixel_values} return config, inputs_dict @require_flax class A ( __lowerCAmelCase , unittest.TestCase ): UpperCamelCase__ : List[str] =(FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCamelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" _lowerCamelCase : Union[str, Any] =FlaxViTModelTester(self ) _lowerCamelCase : str =ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase ( self : List[Any] ) -> Dict: """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase ( self : Tuple ) -> Tuple: """simple docstring""" _lowerCamelCase : Tuple =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def lowerCamelCase ( self : Any ) -> Optional[Any]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Optional[Any] =model_class(lowerCamelCase_ ) _lowerCamelCase : Any =inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str =[*signature.parameters.keys()] _lowerCamelCase : Any =['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase ( self : str ) -> Optional[int]: """simple docstring""" _lowerCamelCase , _lowerCamelCase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _lowerCamelCase : Any =self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) _lowerCamelCase : Union[str, Any] =model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowercase_ : Any , **lowercase_ : Any ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest('JIT Enabled' ): _lowerCamelCase : Tuple =model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _lowerCamelCase : str =model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" for model_class_name in self.all_model_classes: _lowerCamelCase : List[str] =model_class_name.from_pretrained('google/vit-base-patch16-224' ) _lowerCamelCase : str =model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase_ )
199
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = num_groups def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = BitModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = BitForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowerCAmelCase = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""Bit does not output attentions""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(config=lowerCamelCase_ ) for name, module in model.named_modules(): if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def lowerCamelCase_ ( self : int ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): UpperCamelCase = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : int ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitBackbone,) if is_torch_available() else () __lowerCAmelCase = BitConfig __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self )
343
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available SCREAMING_SNAKE_CASE : Union[str, Any] = { """configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""], """tokenization_xlm""": ["""XLMTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : str = [ """XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMForMultipleChoice""", """XLMForQuestionAnswering""", """XLMForQuestionAnsweringSimple""", """XLMForSequenceClassification""", """XLMForTokenClassification""", """XLMModel""", """XLMPreTrainedModel""", """XLMWithLMHeadModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Optional[Any] = [ """TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMForMultipleChoice""", """TFXLMForQuestionAnsweringSimple""", """TFXLMForSequenceClassification""", """TFXLMForTokenClassification""", """TFXLMMainLayer""", """TFXLMModel""", """TFXLMPreTrainedModel""", """TFXLMWithLMHeadModel""", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
102
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = TFResNetModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __lowerCAmelCase = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = TFResNetModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ): UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Any ): """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # forward pass UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
343
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand SCREAMING_SNAKE_CASE : Tuple = ( "4S 3H 2C 7S 5H", "9D 8H 2C 6S 7H", "2D 6D 9D TH 7D", "TC 8C 2S JH 6C", "JH 8S TH AH QH", "TS KS 5S 9S AC", "KD 6S 9D TH AD", "KS 8D 4D 9S 4S", # pair "8C 4S KH JS 4D", # pair "QH 8H KD JH 8S", # pair "KC 4H KS 2H 8D", # pair "KD 4S KC 3H 8S", # pair "AH 8S AS KC JH", # pair "3H 4C 4H 3S 2H", # 2 pairs "5S 5D 2C KH KH", # 2 pairs "3C KH 5D 5S KH", # 2 pairs "AS 3C KH AD KH", # 2 pairs "7C 7S 3S 7H 5S", # 3 of a kind "7C 7S KH 2H 7H", # 3 of a kind "AC KH QH AH AS", # 3 of a kind "2H 4D 3C AS 5S", # straight (low ace) "3C 5C 4C 2C 6H", # straight "6S 8S 7S 5H 9H", # straight "JS QS 9H TS KH", # straight "QC KH TS JS AH", # straight (high ace) "8C 9C 5C 3C TC", # flush "3S 8S 9S 5S KS", # flush "4C 5C 9C 8C KC", # flush "JH 8H AH KH QH", # flush "3D 2H 3H 2C 2D", # full house "2H 2C 3S 3H 3D", # full house "KH KC 3S 3H 3D", # full house "JC 6H JS JD JH", # 4 of a kind "JC 7H JS JD JH", # 4 of a kind "JC KH JS JD JH", # 4 of a kind "2S AS 4S 5S 3S", # straight flush (low ace) "2D 6D 3D 4D 5D", # straight flush "5C 6C 3C 7C 4C", # straight flush "JH 9H TH KH QH", # straight flush "JH AH TH KH QH", # royal flush (high ace straight flush) ) SCREAMING_SNAKE_CASE : Optional[Any] = ( ("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"), ("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"), ("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"), ("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"), ("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"), ("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"), ("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"), ("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"), ("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"), ("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"), ("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"), ("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"), ("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"), ("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"), ("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"), ("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"), ("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"), ("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"), ("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"), ("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"), ("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"), ("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"), ("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"), ("AH AD KS KC AC", "AH KD KH AC KC", "Win"), ("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"), ("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"), ("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"), ("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"), ("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"), ("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"), ("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"), ("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"), ) SCREAMING_SNAKE_CASE : Union[str, Any] = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", True), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", False), ("AS 3S 4S 8S 2S", True), ) SCREAMING_SNAKE_CASE : Optional[int] = ( ("2H 3H 4H 5H 6H", True), ("AS AH 2H AD AC", False), ("2H 3H 5H 6H 7H", False), ("KS AS TS QS JS", True), ("8H 9H QS JS TH", True), ) SCREAMING_SNAKE_CASE : List[Any] = ( ("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]), ("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]), ("JH QD KC AS TS", False, [14, 13, 12, 11, 10]), ("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]), ) SCREAMING_SNAKE_CASE : Dict = ( ("JH AH TH KH QH", 0), ("JH 9H TH KH QH", 0), ("JC KH JS JD JH", 7), ("KH KC 3S 3H 3D", 6), ("8C 9C 5C 3C TC", 0), ("JS QS 9H TS KH", 0), ("7C 7S KH 2H 7H", 3), ("3C KH 5D 5S KH", 2), ("QH 8H KD JH 8S", 1), ("2D 6D 9D TH 7D", 0), ) SCREAMING_SNAKE_CASE : List[str] = ( ("JH AH TH KH QH", 23), ("JH 9H TH KH QH", 22), ("JC KH JS JD JH", 21), ("KH KC 3S 3H 3D", 20), ("8C 9C 5C 3C TC", 19), ("JS QS 9H TS KH", 18), ("7C 7S KH 2H 7H", 17), ("3C KH 5D 5S KH", 16), ("QH 8H KD JH 8S", 15), ("2D 6D 9D TH 7D", 14), ) def UpperCamelCase_( ) -> Dict: _lowercase , _lowercase : Optional[int] = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) ) _lowercase : int = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] _lowercase , _lowercase : Any = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def UpperCamelCase_( lowerCamelCase_ = 100 ) -> List[Any]: return (generate_random_hand() for _ in range(UpperCamelCase_ )) @pytest.mark.parametrize('hand, expected' , UpperCamelCase_ ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]: assert PokerHand(UpperCamelCase_ )._is_flush() == expected @pytest.mark.parametrize('hand, expected' , UpperCamelCase_ ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Tuple: assert PokerHand(UpperCamelCase_ )._is_straight() == expected @pytest.mark.parametrize('hand, expected, card_values' , UpperCamelCase_ ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Dict: _lowercase : Optional[int] = PokerHand(UpperCamelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize('hand, expected' , UpperCamelCase_ ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]: assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected @pytest.mark.parametrize('hand, expected' , UpperCamelCase_ ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any: assert PokerHand(UpperCamelCase_ )._hand_type == expected @pytest.mark.parametrize('hand, other, expected' , UpperCamelCase_ ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]: assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected @pytest.mark.parametrize('hand, other, expected' , generate_random_hands() ) def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int: assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected def UpperCamelCase_( ) -> Dict: _lowercase : Dict = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS] _lowercase : Optional[Any] = poker_hands.copy() shuffle(UpperCamelCase_ ) _lowercase : List[str] = chain(sorted(UpperCamelCase_ ) ) for index, hand in enumerate(UpperCamelCase_ ): assert hand == poker_hands[index] def UpperCamelCase_( ) -> Union[str, Any]: _lowercase : int = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )] pokerhands.sort(reverse=UpperCamelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def UpperCamelCase_( ) -> str: _lowercase : List[str] = PokerHand('2C 4S AS 3D 5C' ) _lowercase : List[str] = True _lowercase : Union[str, Any] = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def UpperCamelCase_( ) -> int: _lowercase : Dict = 0 _lowercase : List[Any] = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) _lowercase : Optional[Any] = os.path.join(UpperCamelCase_ , 'poker_hands.txt' ) with open(UpperCamelCase_ ) as file_hand: for line in file_hand: _lowercase : Union[str, Any] = line[:14].strip() _lowercase : Optional[int] = line[15:].strip() _lowercase , _lowercase : Dict = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ ) _lowercase : int = player.compare_with(UpperCamelCase_ ) if output == "Win": answer += 1 assert answer == 376
21
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val def lowercase( UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase = """""" if is_panoptic: UpperCamelCase = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCamelCase = """resnet101""" if "dc5" in model_name: UpperCamelCase = True UpperCamelCase = """panoptic""" in model_name if is_panoptic: UpperCamelCase = 250 else: UpperCamelCase = 91 UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """coco-detection-id2label.json""" UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} # load image processor UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection""" UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ ) # prepare image UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) UpperCamelCase = encoding["""pixel_values"""] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval() UpperCamelCase = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCamelCase = """conditional_detr.""" + src rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val # finally, create HuggingFace model and load state dict UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion UpperCamelCase = conditional_detr(UpperCamelCase_ ) UpperCamelCase = model(UpperCamelCase_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
343
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : Optional[int]= logging.get_logger(__name__) _a : Optional[Any]= { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class UpperCamelCase ( __lowerCAmelCase ): UpperCAmelCase : List[Any] = """camembert""" def __init__(self : Dict , _A : Tuple=3_05_22 , _A : Any=7_68 , _A : List[Any]=12 , _A : Optional[int]=12 , _A : int=30_72 , _A : List[Any]="gelu" , _A : Any=0.1 , _A : List[Any]=0.1 , _A : List[Any]=5_12 , _A : List[Any]=2 , _A : Any=0.02 , _A : Tuple=1E-12 , _A : Optional[Any]=1 , _A : str=0 , _A : Union[str, Any]=2 , _A : Any="absolute" , _A : Dict=True , _A : List[Any]=None , **_A : Any , ) -> Any: super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_) __snake_case : int = vocab_size __snake_case : Optional[int] = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : Any = hidden_act __snake_case : List[str] = intermediate_size __snake_case : int = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : Optional[int] = type_vocab_size __snake_case : Dict = initializer_range __snake_case : int = layer_norm_eps __snake_case : Union[str, Any] = position_embedding_type __snake_case : Union[str, Any] = use_cache __snake_case : str = classifier_dropout class UpperCamelCase ( __lowerCAmelCase ): @property def _lowercase (self : List[str]) -> Any: if self.task == "multiple-choice": __snake_case : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __snake_case : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ])
172
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = mask_ratio UpperCamelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) # expected sequence length = num_patches UpperCamelCase = (self.image_size // self.patch_size) ** 2 UpperCamelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) UpperCamelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self : str ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = outputs_dict[0].numpy() UpperCamelCase = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ): UpperCamelCase = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCamelCase_ ): UpperCamelCase = v.numpy() else: UpperCamelCase = np.array(lowerCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.constant(lowerCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase = tf_noise super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(lowerCamelCase_ ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ ) } UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: UpperCamelCase = main_layer_class(lowerCamelCase_ ) UpperCamelCase = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) ) UpperCamelCase = model(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" ) model.save(lowerCamelCase_ ) UpperCamelCase = tf.keras.models.load_model( lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(lowerCamelCase_ , tf.keras.Model ) UpperCamelCase = model(lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = outputs.last_hidden_state.numpy() UpperCamelCase = 0 else: UpperCamelCase = outputs.logits.numpy() UpperCamelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ ) UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = after_outputs["""last_hidden_state"""].numpy() UpperCamelCase = 0 else: UpperCamelCase = after_outputs["""logits"""].numpy() UpperCamelCase = 0 UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCamelCase_ ) UpperCamelCase = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCamelCase = model_class.from_config(model.config ) UpperCamelCase = new_model(lowerCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> int: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase = ViTMAEConfig() UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
343
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : str = { 'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'], 'processing_git': ['GitProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[str] = [ 'GIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'GitForCausalLM', 'GitModel', 'GitPreTrainedModel', 'GitVisionModel', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowercase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
3
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' # Base Case if index == len(UpperCamelCase_ ): return True # Recursive Step for i in range(UpperCamelCase_ ): if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]: '''simple docstring''' UpperCamelCase = [-1] * len(UpperCamelCase_ ) if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ): return colored_vertices return []
343
0
'''simple docstring''' def lowercase_ ( _lowercase ) -> int: '''simple docstring''' if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise TypeError('''Input value must be an \'int\' type''' ) lowerCamelCase_ : List[Any] = 0 while number: position += 1 number >>= 1 return position if __name__ == "__main__": import doctest doctest.testmod()
318
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase( ) -> List[str]: '''simple docstring''' UpperCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase_ ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs] return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase = json.load(lowerCamelCase_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self : str ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : str ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(lowerCamelCase_ ): try: UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCamelCase = get_pairs(lowerCamelCase_ ) UpperCamelCase = """ """.join(lowerCamelCase_ ) UpperCamelCase = word return word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , lowerCamelCase_ ): UpperCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ): """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = """""".join(lowerCamelCase_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCamelCase = 0 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
343
0
from collections import deque from .hash_table import HashTable class __A( __lowerCAmelCase ): def __init__( self , *_snake_case , **_snake_case ) -> Optional[int]: '''simple docstring''' super().__init__(*lowerCamelCase_ , **lowerCamelCase_ ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case ) -> Optional[int]: '''simple docstring''' __a = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(lowerCamelCase_ ) __a = self.values[key] def SCREAMING_SNAKE_CASE_ ( self ) -> Any: '''simple docstring''' return ( sum(self.charge_factor - len(lowerCamelCase_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> List[Any]: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(lowerCamelCase_ ) == 0 ): return key return super()._collision_resolution(lowerCamelCase_ , lowerCamelCase_ )
6
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _SCREAMING_SNAKE_CASE = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]), ("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 2_3), ("""JH 9H TH KH QH""", 2_2), ("""JC KH JS JD JH""", 2_1), ("""KH KC 3S 3H 3D""", 2_0), ("""8C 9C 5C 3C TC""", 1_9), ("""JS QS 9H TS KH""", 1_8), ("""7C 7S KH 2H 7H""", 1_7), ("""3C KH 5D 5S KH""", 1_6), ("""QH 8H KD JH 8S""", 1_5), ("""2D 6D 9D TH 7D""", 1_4), ) def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) ) UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowercase( UpperCamelCase_ = 100 ) -> List[Any]: '''simple docstring''' return (generate_random_hand() for _ in range(UpperCamelCase_ )) @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: '''simple docstring''' UpperCamelCase = PokerHand(UpperCamelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(UpperCamelCase_ ) UpperCamelCase = chain(sorted(UpperCamelCase_ ) ) for index, hand in enumerate(UpperCamelCase_ ): assert hand == poker_hands[index] def lowercase( ) -> Union[str, Any]: '''simple docstring''' # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=UpperCamelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowercase( ) -> str: '''simple docstring''' # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowercase( ) -> int: '''simple docstring''' # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" ) with open(UpperCamelCase_ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ ) UpperCamelCase = player.compare_with(UpperCamelCase_ ) if output == "Win": answer += 1 assert answer == 376
343
0
'''simple docstring''' def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : Tuple , lowercase : Optional[Any] ) -> str: global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _a = mf_knapsack(i - 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) else: _a = max( mf_knapsack(i - 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , mf_knapsack(i - 1 , UpperCamelCase_ , UpperCamelCase_ , j - wt[i - 1] ) + val[i - 1] , ) _a = val return f[i][j] def _lowerCamelCase ( lowercase : str , lowercase : List[str] , lowercase : str , lowercase : List[Any] ) -> int: _a = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _a = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _a = dp[i - 1][w_] return dp[n][w_], dp def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : List[Any] ) -> Optional[int]: if not (isinstance(UpperCamelCase_ , (list, tuple) ) and isinstance(UpperCamelCase_ , (list, tuple) )): raise ValueError( "Both the weights and values vectors must be either lists or tuples" ) _a = len(UpperCamelCase_ ) if num_items != len(UpperCamelCase_ ): _a = ( "The number of weights must be the same as the number of values.\n" F'But got {num_items} weights and {len(UpperCamelCase_ )} values' ) raise ValueError(UpperCamelCase_ ) for i in range(UpperCamelCase_ ): if not isinstance(wt[i] , UpperCamelCase_ ): _a = ( "All weights must be integers but got weight of " F'type {type(wt[i] )} at index {i}' ) raise TypeError(UpperCamelCase_ ) _a , _a = knapsack(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _a = set() _construct_solution(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return optimal_val, example_optional_set def _lowerCamelCase ( lowercase : List[Any] , lowercase : Tuple , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : List[Any] ) -> Optional[int]: # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCamelCase_ , UpperCamelCase_ , i - 1 , UpperCamelCase_ , UpperCamelCase_ ) else: optimal_set.add(UpperCamelCase_ ) _construct_solution(UpperCamelCase_ , UpperCamelCase_ , i - 1 , j - wt[i - 1] , UpperCamelCase_ ) if __name__ == "__main__": lowerCAmelCase_ : Dict = [3, 2, 4, 4] lowerCAmelCase_ : int = [4, 3, 2, 3] lowerCAmelCase_ : Any = 4 lowerCAmelCase_ : Dict = 6 lowerCAmelCase_ : int = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 lowerCAmelCase_ , lowerCAmelCase_ : List[str] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print('optimal_value = ', optimal_solution) print('An optimal subset corresponding to the optimal value', optimal_subset)
63
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """xlnet""" __lowerCAmelCase = ["""mems"""] __lowerCAmelCase = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = n_layer UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) UpperCamelCase = d_model // n_head UpperCamelCase = ff_activation UpperCamelCase = d_inner UpperCamelCase = untie_r UpperCamelCase = attn_type UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = dropout UpperCamelCase = mem_len UpperCamelCase = reuse_len UpperCamelCase = bi_data UpperCamelCase = clamp_len UpperCamelCase = same_length UpperCamelCase = summary_type UpperCamelCase = summary_use_proj UpperCamelCase = summary_activation UpperCamelCase = summary_last_dropout UpperCamelCase = start_n_top UpperCamelCase = end_n_top UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , lowerCamelCase_ , ) UpperCamelCase = kwargs["""use_cache"""] UpperCamelCase = use_mems_eval UpperCamelCase = use_mems_train super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
343
0
'''simple docstring''' import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=3 , _lowerCAmelCase=32 , _lowerCAmelCase=3 , _lowerCAmelCase=10 , _lowerCAmelCase=[8, 16, 32, 64] , _lowerCAmelCase=[1, 1, 2, 1] , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=3 , _lowerCAmelCase=None , _lowerCAmelCase=["stage2", "stage3", "stage4"] , _lowerCAmelCase=[2, 3, 4] , _lowerCAmelCase=1 , ) -> int: _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = image_size _lowerCAmelCase = num_channels _lowerCAmelCase = embeddings_size _lowerCAmelCase = hidden_sizes _lowerCAmelCase = depths _lowerCAmelCase = is_training _lowerCAmelCase = use_labels _lowerCAmelCase = hidden_act _lowerCAmelCase = num_labels _lowerCAmelCase = scope _lowerCAmelCase = len(lowerCamelCase_ ) _lowerCAmelCase = out_features _lowerCAmelCase = out_indices _lowerCAmelCase = num_groups def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels ) _lowerCAmelCase = self.get_config() return config, pixel_values, labels def _snake_case ( self ) -> Dict: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: _lowerCAmelCase = BitModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _lowerCAmelCase = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = self.num_labels _lowerCAmelCase = BitForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _lowerCAmelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: _lowerCAmelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _lowerCAmelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None _lowerCAmelCase = None _lowerCAmelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() _lowerCAmelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( __lowerCAmelCase ,__lowerCAmelCase ,unittest.TestCase ): __lowerCamelCase : List[str] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowerCamelCase : Optional[int] = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) __lowerCamelCase : Tuple = False __lowerCamelCase : List[str] = False __lowerCamelCase : Optional[int] = False __lowerCamelCase : List[str] = False __lowerCamelCase : str = False def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = BitModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def _snake_case ( self ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self ) -> Dict: return @unittest.skip(reason="Bit does not output attentions" ) def _snake_case ( self ) -> List[Any]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def _snake_case ( self ) -> List[str]: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def _snake_case ( self ) -> Optional[int]: pass def _snake_case ( self ) -> str: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(lowerCamelCase_ ) _lowerCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase = [*signature.parameters.keys()] _lowerCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def _snake_case ( self ) -> Tuple: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def _snake_case ( self ) -> int: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def _snake_case ( self ) -> Any: _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCAmelCase = model_class(config=lowerCamelCase_ ) for name, module in model.named_modules(): if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def _snake_case ( self ) -> Tuple: def check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): _lowerCAmelCase = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): _lowerCAmelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) _lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _lowerCAmelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: _lowerCAmelCase = layer_type _lowerCAmelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def _snake_case ( self ) -> str: pass def _snake_case ( self ) -> List[Any]: _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def _snake_case ( self ) -> str: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase = BitModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def __a(): '''simple docstring''' _lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): @cached_property def _snake_case ( self ) -> Tuple: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _snake_case ( self ) -> List[str]: _lowerCAmelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ ) _lowerCAmelCase = self.default_image_processor _lowerCAmelCase = prepare_img() _lowerCAmelCase = image_processor(images=lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): _lowerCAmelCase = model(**lowerCamelCase_ ) # verify the logits _lowerCAmelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) _lowerCAmelCase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) @require_torch class lowerCAmelCase_ ( __lowerCAmelCase ,unittest.TestCase ): __lowerCamelCase : str = (BitBackbone,) if is_torch_available() else () __lowerCamelCase : Optional[int] = BitConfig __lowerCamelCase : int = False def _snake_case ( self ) -> str: _lowerCAmelCase = BitModelTester(self )
158
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""") class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = 0 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved UpperCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase_ ( self : Any ): """simple docstring""" class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = True try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
343
0
"""simple docstring""" import argparse import struct import unittest class SCREAMING_SNAKE_CASE__ : def __init__( self : Any , lowerCAmelCase_ : bytes): """simple docstring""" lowercase_ = data # Initialize hash values lowercase_ = [ 0x6A_09E_667, 0xBB_67A_E85, 0x3C_6EF_372, 0xA5_4FF_53A, 0x51_0E5_27F, 0x9B_056_88C, 0x1F_83D_9AB, 0x5B_E0C_D19, ] # Initialize round constants lowercase_ = [ 0x42_8A2_F98, 0x71_374_491, 0xB5_C0F_BCF, 0xE9_B5D_BA5, 0x39_56C_25B, 0x59_F11_1F1, 0x92_3F8_2A4, 0xAB_1C5_ED5, 0xD8_07A_A98, 0x12_835_B01, 0x24_318_5BE, 0x55_0C7_DC3, 0x72_BE5_D74, 0x80_DEB_1FE, 0x9B_DC0_6A7, 0xC1_9BF_174, 0xE4_9B6_9C1, 0xEF_BE4_786, 0x0F_C19_DC6, 0x24_0CA_1CC, 0x2D_E92_C6F, 0x4A_748_4AA, 0x5C_B0A_9DC, 0x76_F98_8DA, 0x98_3E5_152, 0xA8_31C_66D, 0xB0_032_7C8, 0xBF_597_FC7, 0xC6_E00_BF3, 0xD5_A79_147, 0x06_CA6_351, 0x14_292_967, 0x27_B70_A85, 0x2E_1B2_138, 0x4D_2C6_DFC, 0x53_380_D13, 0x65_0A7_354, 0x76_6A0_ABB, 0x81_C2C_92E, 0x92_722_C85, 0xA2_BFE_8A1, 0xA8_1A6_64B, 0xC2_4B8_B70, 0xC7_6C5_1A3, 0xD1_92E_819, 0xD6_990_624, 0xF4_0E3_585, 0x10_6AA_070, 0x19_A4C_116, 0x1E_376_C08, 0x27_487_74C, 0x34_B0B_CB5, 0x39_1C0_CB3, 0x4E_D8A_A4A, 0x5B_9CC_A4F, 0x68_2E6_FF3, 0x74_8F8_2EE, 0x78_A56_36F, 0x84_C87_814, 0x8C_C70_208, 0x90_BEF_FFA, 0xA4_506_CEB, 0xBE_F9A_3F7, 0xC6_717_8F2, ] lowercase_ = self.preprocessing(self.data) self.final_hash() @staticmethod def _UpperCAmelCase ( lowerCAmelCase_ : bytes): """simple docstring""" lowercase_ = b"""\x80""" + (b"""\x00""" * (6_3 - (len(lowerCamelCase_) + 8) % 6_4)) lowercase_ = struct.pack(""">Q""" , (len(lowerCamelCase_) * 8)) return data + padding + big_endian_integer def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" lowercase_ = [ self.preprocessed_data[x : x + 6_4] for x in range(0 , len(self.preprocessed_data) , 6_4) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers lowercase_ = list(struct.unpack(""">16L""" , lowerCamelCase_)) # add 48 0-ed integers words += [0] * 4_8 lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = self.hashes for index in range(0 , 6_4): if index > 1_5: # modify the zero-ed indexes at the end of the array lowercase_ = ( self.ror(words[index - 1_5] , 7) ^ self.ror(words[index - 1_5] , 1_8) ^ (words[index - 1_5] >> 3) ) lowercase_ = ( self.ror(words[index - 2] , 1_7) ^ self.ror(words[index - 2] , 1_9) ^ (words[index - 2] >> 1_0) ) lowercase_ = ( words[index - 1_6] + sa + words[index - 7] + sa ) % 0x100_000_000 # Compression lowercase_ = self.ror(lowerCamelCase_ , 6) ^ self.ror(lowerCamelCase_ , 1_1) ^ self.ror(lowerCamelCase_ , 2_5) lowercase_ = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g) lowercase_ = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100_000_000 lowercase_ = self.ror(lowerCamelCase_ , 2) ^ self.ror(lowerCamelCase_ , 1_3) ^ self.ror(lowerCamelCase_ , 2_2) lowercase_ = (a & b) ^ (a & c) ^ (b & c) lowercase_ = (sa + maj) % 0x100_000_000 lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = ( g, f, e, ((d + tempa) % 0x100_000_000), c, b, a, ((tempa + tempa) % 0x100_000_000), ) lowercase_ = [a, b, c, d, e, f, g, h] # Modify final values lowercase_ = [ ((element + mutated_hash_values[index]) % 0x100_000_000) for index, element in enumerate(self.hashes) ] lowercase_ = """""".join([hex(lowerCamelCase_)[2:].zfill(8) for value in self.hashes]) def _UpperCAmelCase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int): """simple docstring""" return 0xFF_FFF_FFF & (value << (3_2 - rotations)) | (value >> rotations) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def _UpperCAmelCase ( self : Dict): """simple docstring""" import hashlib lowercase_ = bytes("""Test String""" , """utf-8""") self.assertEqual(SHAaaa(lowerCamelCase_).hash , hashlib.shaaaa(lowerCamelCase_).hexdigest()) def _SCREAMING_SNAKE_CASE () -> None: '''simple docstring''' import doctest doctest.testmod() lowercase_ = argparse.ArgumentParser() parser.add_argument( """-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument( """-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) lowercase_ = parser.parse_args() lowercase_ = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: lowercase_ = f.read() else: lowercase_ = bytes(UpperCamelCase_ , """utf-8""" ) print(SHAaaa(UpperCamelCase_ ).hash ) if __name__ == "__main__": main()
136
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X2_0000 and cp <= 0X2_A6DF) # or (cp >= 0X2_A700 and cp <= 0X2_B73F) # or (cp >= 0X2_B740 and cp <= 0X2_B81F) # or (cp >= 0X2_B820 and cp <= 0X2_CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2_F800 and cp <= 0X2_FA1F) # ): # return True return False def lowercase( UpperCamelCase_ ) -> Dict: '''simple docstring''' # word like '180' or '身高' or '神' for char in word: UpperCamelCase = ord(UpperCamelCase_ ) if not _is_chinese_char(UpperCamelCase_ ): return 0 return 1 def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' UpperCamelCase = set() for token in tokens: UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ ) if chinese_word: word_set.add(UpperCamelCase_ ) UpperCamelCase = list(UpperCamelCase_ ) return word_list def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] ) UpperCamelCase = bert_tokens UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ ) while start < end: UpperCamelCase = True if is_chinese(bert_word[start] ): UpperCamelCase = min(end - start , UpperCamelCase_ ) for i in range(UpperCamelCase_ , 1 , -1 ): UpperCamelCase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): UpperCamelCase = """##""" + bert_word[j] UpperCamelCase = start + i UpperCamelCase = False break if single_word: start += 1 return bert_word def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0] UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res] ltp_res.extend(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = [] for id in input_ids: UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ ) input_tokens.append(UpperCamelCase_ ) UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase_ ): if token[:2] == "##": UpperCamelCase = token[2:] # save chinese tokens' pos if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ): ref_id.append(UpperCamelCase_ ) ref_ids.append(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) return ref_ids def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: UpperCamelCase = f.readlines() UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' UpperCamelCase = LTP(args.ltp ) # faster in GPU device UpperCamelCase = BertTokenizer.from_pretrained(args.bert ) UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids] f.writelines(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
343
0
"""simple docstring""" import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Any ): debug_launcher(test_script.main ) def lowerCAmelCase_ ( self : str ): debug_launcher(test_ops.main )
315
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 1 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (self.image_size, self.image_size) UpperCamelCase = (self.patch_size, self.patch_size) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = FlaxViTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest("""JIT Enabled""" ): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) UpperCamelCase = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase_ )
343
0
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple ): '''simple docstring''' _lowerCamelCase : Optional[Any] =len(UpperCamelCase_ ) for i in range(length - 1 ): _lowerCamelCase : List[str] =i for k in range(i + 1 , UpperCamelCase_ ): if collection[k] < collection[least]: _lowerCamelCase : Tuple =k if least != i: _lowerCamelCase , _lowerCamelCase : Any =(collection[i], collection[least]) return collection if __name__ == "__main__": lowerCamelCase = input('Enter numbers separated by a comma:\n').strip() lowerCamelCase = [int(item) for item in user_input.split(',')] print(selection_sort(unsorted))
199
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ : def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = range_bbox def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase = bbox[i, j, 3] UpperCamelCase = bbox[i, j, 1] UpperCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase = bbox[i, j, 2] UpperCamelCase = bbox[i, j, 0] UpperCamelCase = t UpperCamelCase = None if self.use_input_mask: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __lowerCAmelCase = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ): """simple docstring""" return True def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = LiltModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ ) UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ ) UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ ) UpperCamelCase = torch.Size([1, 2, 768] ) UpperCamelCase = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
343
0
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def lowercase ( _snake_case : Any , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) ->Dict: """simple docstring""" __snake_case : Union[str, Any] = 0 if start < end: __snake_case : Dict = randint(UpperCamelCase_ , UpperCamelCase_ ) __snake_case : Tuple = a[end] __snake_case : List[Any] = a[pivot] __snake_case : str = temp __snake_case , __snake_case : Union[str, Any] = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 ) count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ ) return count def lowercase ( _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : str ) ->str: """simple docstring""" __snake_case : str = 0 __snake_case : List[Any] = randint(UpperCamelCase_ , UpperCamelCase_ ) __snake_case : Dict = a[end] __snake_case : Optional[Any] = a[pivot] __snake_case : List[str] = temp __snake_case : Dict = start - 1 for index in range(UpperCamelCase_ , UpperCamelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value __snake_case : Optional[int] = new_pivot_index + 1 __snake_case : Dict = a[new_pivot_index] __snake_case : List[str] = a[index] __snake_case : Tuple = temp __snake_case : str = a[new_pivot_index + 1] __snake_case : Optional[Any] = a[end] __snake_case : Dict = temp return new_pivot_index + 1, count SCREAMING_SNAKE_CASE : int = TemporaryFile() SCREAMING_SNAKE_CASE : Union[str, Any] = 100 # 1000 elements are to be sorted SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = 0, 1 # mean and standard deviation SCREAMING_SNAKE_CASE : List[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array SCREAMING_SNAKE_CASE : Optional[int] = np.load(outfile) SCREAMING_SNAKE_CASE : int = len(M) - 1 SCREAMING_SNAKE_CASE : int = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
102
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ): """simple docstring""" UpperCamelCase = size if size is not None else {"""shortest_edge""": 30} UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize_and_center_crop UpperCamelCase = size UpperCamelCase = crop_pct UpperCamelCase = crop_size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = PoolFormerImageProcessingTester(self ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
343
0
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class _lowerCamelCase( unittest.TestCase ): def UpperCamelCase ( self) -> Dict: """simple docstring""" _lowercase : Optional[Any] = 'hf-internal-testing/tiny-random-t5' _lowercase : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase_) _lowercase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_) _lowercase : Any = tokenizer('This is me', return_tensors='pt') _lowercase : str = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules())) _lowercase : str = model.generate(**lowerCamelCase_) _lowercase : Optional[int] = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_) _lowercase : str = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules())) _lowercase : int = model_reloaded.generate(**lowerCamelCase_) self.assertTrue(torch.allclose(lowerCamelCase_, lowerCamelCase_)) def UpperCamelCase ( self) -> List[Any]: """simple docstring""" _lowercase : int = 'hf-internal-testing/tiny-random-t5' _lowercase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase_) _lowercase : str = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(lowerCamelCase_): model.save_pretrained(lowerCamelCase_) _lowercase : Optional[int] = model.reverse_bettertransformer() model.save_pretrained(lowerCamelCase_)
21
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
343
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _a : Dict= logging.get_logger(__name__) _a : Tuple= {"vocab_file": "sentencepiece.bpe.model"} _a : Optional[Any]= { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", } } _a : List[Any]= { "camembert-base": 512, } _a : Optional[int]= "▁" class UpperCamelCase ( __lowerCAmelCase ): UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : int = ["""input_ids""", """attention_mask"""] def __init__(self : List[str] , _A : int , _A : Dict="<s>" , _A : List[str]="</s>" , _A : str="</s>" , _A : List[Any]="<s>" , _A : Optional[Any]="<unk>" , _A : Dict="<pad>" , _A : Optional[Any]="<mask>" , _A : Dict=["<s>NOTUSED", "</s>NOTUSED"] , _A : Optional[Dict[str, Any]] = None , **_A : str , ) -> Optional[int]: __snake_case : Optional[int] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_) if isinstance(lowerCamelCase_ , lowerCamelCase_) else mask_token __snake_case : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) __snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(lowerCamelCase_)) __snake_case : Union[str, Any] = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> __snake_case : List[Any] = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3} __snake_case : List[Any] = len(self.fairseq_tokens_to_ids) __snake_case : str = len(self.sp_model) + len(self.fairseq_tokens_to_ids) __snake_case : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def _lowercase (self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None) -> Union[str, Any]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : Union[str, Any] = [self.cls_token_id] __snake_case : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase (self : Dict , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False) -> Optional[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_)) + [1] return [1] + ([0] * len(lowerCamelCase_)) + [1, 1] + ([0] * len(lowerCamelCase_)) + [1] def _lowercase (self : str , _A : List[int] , _A : Optional[List[int]] = None) -> Optional[Any]: __snake_case : Union[str, Any] = [self.sep_token_id] __snake_case : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def _lowercase (self : Any) -> str: return len(self.fairseq_tokens_to_ids) + len(self.sp_model) def _lowercase (self : Dict) -> List[Any]: __snake_case : Any = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _lowercase (self : Optional[int] , _A : str) -> Union[str, Any]: return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_) def _lowercase (self : Optional[int] , _A : Optional[Any]) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(lowerCamelCase_) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(lowerCamelCase_) def _lowercase (self : Dict , _A : Union[str, Any]) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def _lowercase (self : Union[str, Any] , _A : Union[str, Any]) -> int: __snake_case : Dict = [] __snake_case : Tuple = '' __snake_case : Any = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase_) + token __snake_case : Optional[Any] = True __snake_case : Tuple = [] else: current_sub_tokens.append(lowerCamelCase_) __snake_case : List[str] = False out_string += self.sp_model.decode(lowerCamelCase_) return out_string.strip() def __getstate__(self : Optional[Any]) -> Optional[Any]: __snake_case : Optional[Any] = self.__dict__.copy() __snake_case : Union[str, Any] = None return state def __setstate__(self : Optional[Any] , _A : str) -> Dict: __snake_case : Optional[Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): __snake_case : Any = {} __snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _lowercase (self : Union[str, Any] , _A : str , _A : Optional[str] = None) -> Tuple: if not os.path.isdir(lowerCamelCase_): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return __snake_case : int = os.path.join( lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowerCamelCase_) elif not os.path.isfile(self.vocab_file): with open(lowerCamelCase_ , 'wb') as fi: __snake_case : Tuple = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_) return (out_vocab_file,)
172
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """trocr""" __lowerCAmelCase = ["""past_key_values"""] __lowerCAmelCase = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = activation_function UpperCamelCase = max_position_embeddings UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = init_std UpperCamelCase = decoder_layerdrop UpperCamelCase = use_cache UpperCamelCase = scale_embedding UpperCamelCase = use_learned_position_embeddings UpperCamelCase = layernorm_embedding super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
343
0
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase_ ( snake_case__ , snake_case__=10 ): '''simple docstring''' A : Union[str, Any] = [] for _ in range(UpperCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase_ ( snake_case__ , snake_case__=10 ): '''simple docstring''' A : Union[str, Any] = [] for step in range(UpperCamelCase_ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = os.path.join(UpperCamelCase_ , '''schedule.bin''' ) torch.save(scheduler.state_dict() , UpperCamelCase_ ) A : int = torch.load(UpperCamelCase_ ) scheduler.load_state_dict(UpperCamelCase_ ) return lrs @require_torch class A ( unittest.TestCase ): def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertAlmostEqual(lowerCamelCase_ , lowerCamelCase_ , delta=lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase_ ) A : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) A : Dict = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A : str = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): A : Tuple = criterion(lowerCamelCase_ , lowerCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : List[str] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase_ ) A : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) A : Optional[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping A : Dict = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase_ , weight_decay=0.0 , relative_step=lowerCamelCase_ , scale_parameter=lowerCamelCase_ , warmup_init=lowerCamelCase_ , ) for _ in range(1000 ): A : List[Any] = criterion(lowerCamelCase_ , lowerCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class A ( unittest.TestCase ): __magic_name__ = nn.Linear(50 , 50 ) if is_torch_available() else None __magic_name__ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __magic_name__ = 10 def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertAlmostEqual(lowerCamelCase_ , lowerCamelCase_ , delta=lowerCamelCase_ , msg=lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Union[str, Any] = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A : List[Any] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A, A : Tuple = data A : List[Any] = scheduler_func(self.optimizer , **lowerCamelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A : Optional[Any] = unwrap_schedule(lowerCamelCase_ , self.num_steps ) self.assertListAlmostEqual( lowerCamelCase_ , lowerCamelCase_ , tol=1e-2 , msg=F'failed for {scheduler_func} in normal scheduler' , ) A : Any = scheduler_func(self.optimizer , **lowerCamelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase_ ) # wrap to test picklability of the schedule A : int = unwrap_and_save_reload_schedule(lowerCamelCase_ , self.num_steps ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ , msg=F'failed for {scheduler_func} in save and reload' ) class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : List[str] = fn def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Any = list(map(self , scheduler.lr_lambdas ) )
3
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase = """swin""" __lowerCAmelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names ) class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return 1E-4
343
0
'''simple docstring''' import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __lowercase ( __lowerCAmelCase ): def __init__(self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ): lowerCamelCase_ : List[str] = parent lowerCamelCase_ : str = batch_size lowerCamelCase_ : Union[str, Any] = seq_length lowerCamelCase_ : Dict = is_training lowerCamelCase_ : int = use_input_mask lowerCamelCase_ : Any = use_token_type_ids lowerCamelCase_ : Any = use_labels lowerCamelCase_ : Dict = vocab_size lowerCamelCase_ : str = hidden_size lowerCamelCase_ : Dict = num_hidden_layers lowerCamelCase_ : Any = num_attention_heads lowerCamelCase_ : int = intermediate_size lowerCamelCase_ : Dict = hidden_act lowerCamelCase_ : Any = hidden_dropout_prob lowerCamelCase_ : Optional[Any] = attention_probs_dropout_prob lowerCamelCase_ : Dict = max_position_embeddings lowerCamelCase_ : List[Any] = type_vocab_size lowerCamelCase_ : Tuple = type_sequence_label_size lowerCamelCase_ : List[Any] = initializer_range lowerCamelCase_ : Optional[int] = num_labels lowerCamelCase_ : List[str] = num_choices lowerCamelCase_ : Optional[int] = scope def UpperCAmelCase__ (self ): lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ : Union[str, Any] = None if self.use_input_mask: lowerCamelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ : Optional[int] = None lowerCamelCase_ : str = None lowerCamelCase_ : Any = None if self.use_labels: lowerCamelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ : Optional[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ (self ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Optional[int] = DistilBertModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase_ : Any = model(lowerCamelCase_ , lowerCamelCase_ ) lowerCamelCase_ : Tuple = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Union[str, Any] = DistilBertForMaskedLM(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase_ : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Union[str, Any] = DistilBertForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase_ : str = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Optional[int] = self.num_labels lowerCamelCase_ : str = DistilBertForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase_ : List[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Optional[int] = self.num_labels lowerCamelCase_ : Optional[Any] = DistilBertForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase_ : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ (self , A , A , A , A , A , A ): lowerCamelCase_ : Union[str, Any] = self.num_choices lowerCamelCase_ : int = DistilBertForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() lowerCamelCase_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ : List[str] = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ (self ): lowerCamelCase_ : str = self.prepare_config_and_inputs() ((lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_)) : Any = config_and_inputs lowerCamelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): lowerCamelCase : int = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase : Tuple = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase : List[Any] = True lowerCamelCase : Any = True lowerCamelCase : Optional[int] = True lowerCamelCase : Any = True def UpperCAmelCase__ (self ): lowerCamelCase_ : List[str] = DistilBertModelTester(self ) lowerCamelCase_ : List[str] = ConfigTester(self , config_class=lowerCamelCase_ , dim=3_7 ) def UpperCAmelCase__ (self ): self.config_tester.run_common_tests() def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*lowerCamelCase_ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*lowerCamelCase_ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*lowerCamelCase_ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*lowerCamelCase_ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*lowerCamelCase_ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*lowerCamelCase_ ) @slow def UpperCAmelCase__ (self ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ : Union[str, Any] = DistilBertModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @slow @require_torch_gpu def UpperCAmelCase__ (self ): lowerCamelCase_, lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return lowerCamelCase_ : List[Any] = True lowerCamelCase_ : Tuple = model_class(config=lowerCamelCase_ ) lowerCamelCase_ : Dict = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) lowerCamelCase_ : int = torch.jit.trace( lowerCamelCase_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , '''traced_model.pt''' ) ) lowerCamelCase_ : Dict = torch.jit.load(os.path.join(lowerCamelCase_ , '''traced_model.pt''' ) , map_location=lowerCamelCase_ ) loaded(inputs_dict['''input_ids'''].to(lowerCamelCase_ ) , inputs_dict['''attention_mask'''].to(lowerCamelCase_ ) ) @require_torch class __lowercase ( unittest.TestCase ): @slow def UpperCAmelCase__ (self ): lowerCamelCase_ : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) lowerCamelCase_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowerCamelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase_ : List[str] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ )[0] lowerCamelCase_ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , lowerCamelCase_ ) lowerCamelCase_ : str = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase_ , atol=1E-4 ) )
318
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) _SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) _SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(6_4, 6_4) ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) _SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) _SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _SCREAMING_SNAKE_CASE = """Normal""" if result[0][0] == 1: _SCREAMING_SNAKE_CASE = """Abnormality detected"""
343
0
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __A( __lowerCAmelCase ): snake_case_ = 4_2 snake_case_ = 4_2 def __init__( self , _snake_case , _snake_case ) -> List[Any]: '''simple docstring''' super().__init__() self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ ) @torch.no_grad() def __call__( self , _snake_case = 1 , _snake_case = 50 , _snake_case = None , _snake_case = "pil" , _snake_case = True , **_snake_case , ) -> Dict: '''simple docstring''' __a = self.unet.config.sample_size __a = (batch_size, 3, img_size, img_size) __a = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) __a = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(lowerCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper __a = self.scheduler.schedule[t] __a = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat __a , __a = self.scheduler.add_noise_to_input(lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. __a = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev __a = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. __a = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample __a = self.scheduler.step_correct( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , step_output.prev_sample , step_output['''derivative'''] , ) __a = step_output.prev_sample __a = (sample / 2 + 0.5).clamp(0 , 1 ) __a = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": __a = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
6
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): pass class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __iter__( self : Optional[int] ): """simple docstring""" UpperCamelCase = self UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data UpperCamelCase = node.next_node @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Node(1) _SCREAMING_SNAKE_CASE = Node(2) _SCREAMING_SNAKE_CASE = Node(3) _SCREAMING_SNAKE_CASE = Node(4) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = root_node.next_node print(root_node.has_loop) # True _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = Node(1) print(root_node.has_loop) # False
343
0
'''simple docstring''' from __future__ import annotations lowerCAmelCase_ : Union[str, Any] = list[tuple[int, int]] lowerCAmelCase_ : int = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCAmelCase_ : Union[str, Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Union[str, Any] , __a : int , __a : int , __a : int , __a : int , __a : float , __a : Node | None , ): _a = pos_x _a = pos_y _a = (pos_y, pos_x) _a = goal_x _a = goal_y _a = g_cost _a = parent _a = self.calculate_heuristic() def UpperCamelCase__ ( self : Optional[Any] ): _a = abs(self.pos_x - self.goal_x ) _a = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : List[Any] , __a : Tuple ): return self.f_cost < other.f_cost class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : Any , __a : tuple[int, int] , __a : tuple[int, int] ): _a = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase_ ) _a = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase_ ) _a = [self.start] _a = [] _a = False def UpperCamelCase__ ( self : Optional[int] ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() _a = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: _a = True return self.retrace_path(lowerCamelCase_ ) self.closed_nodes.append(lowerCamelCase_ ) _a = self.get_successors(lowerCamelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCamelCase_ ) else: # retrieve the best current path _a = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCamelCase_ ) else: self.open_nodes.append(lowerCamelCase_ ) if not self.reached: return [self.start.pos] return None def UpperCamelCase__ ( self : Any , __a : Node ): _a = [] for action in delta: _a = parent.pos_x + action[1] _a = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCamelCase_ , lowerCamelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase_ , ) ) return successors def UpperCamelCase__ ( self : Optional[Any] , __a : Node | None ): _a = node _a = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) _a = current_node.parent path.reverse() return path if __name__ == "__main__": lowerCAmelCase_ : Any = (0, 0) lowerCAmelCase_ : int = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowerCAmelCase_ : Any = GreedyBestFirst(init, goal) lowerCAmelCase_ : int = greedy_bf.search() if path: for pos_x, pos_y in path: lowerCAmelCase_ : List[str] = 2 for elem in grid: print(elem)
63
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , ) assert hasattr(self , """env""" ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = { """enabled""": True, """processes_per_host""": 8, } UpperCamelCase = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ): """simple docstring""" TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = self.create_estimator(lowerCamelCase_ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
343
0
'''simple docstring''' import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) def __a(SCREAMING_SNAKE_CASE_ : List[Any] ): '''simple docstring''' _lowerCAmelCase = torch.load(UpperCamelCase_ , map_location="cpu" ) if "model" in sd.keys(): _lowerCAmelCase = torch.load(UpperCamelCase_ , map_location="cpu" )["model"] # pop unnecessary weights _lowerCAmelCase = [ "decoder.version", "decoder.output_projection.weight", ] for key in keys_to_delete: if key in sd: sd.pop(UpperCamelCase_ ) _lowerCAmelCase = { "decoder.project_in_dim.weight": "decoder.project_in.weight", "decoder.project_out_dim.weight": "decoder.project_out.weight", "decoder.layer_norm.weight": "decoder.final_layer_norm.weight", "decoder.layer_norm.bias": "decoder.final_layer_norm.bias", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: _lowerCAmelCase = sd.pop(UpperCamelCase_ ) _lowerCAmelCase = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: _lowerCAmelCase = sd[key] # We split QKV in separate Q,K,V _lowerCAmelCase = key.replace(".qkv_proj." , ".q_proj." ) _lowerCAmelCase = key.replace(".qkv_proj." , ".k_proj." ) _lowerCAmelCase = key.replace(".qkv_proj." , ".v_proj." ) _lowerCAmelCase = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = torch.split(UpperCamelCase_ , depth // 3 , dim=0 ) _lowerCAmelCase = q _lowerCAmelCase = k _lowerCAmelCase = v del sd[key] return sd @torch.no_grad() def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str]=None ): '''simple docstring''' _lowerCAmelCase = load_checkpoint(UpperCamelCase_ ) if config is not None: _lowerCAmelCase = OPTConfig.from_pretrained(UpperCamelCase_ ) else: _lowerCAmelCase = OPTConfig() _lowerCAmelCase = OPTModel(UpperCamelCase_ ).half().eval() model.load_state_dict(UpperCamelCase_ ) # Check results Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") _SCREAMING_SNAKE_CASE = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
158
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
343
0
"""simple docstring""" import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed UpperCAmelCase : Tuple = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"): from run_translation import main # noqa set_seed(42) UpperCAmelCase : Optional[Any] = "sshleifer/student_marian_en_ro_6_1" UpperCAmelCase : Union[str, Any] = "sshleifer/tiny-mbart" @require_torch class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=True , ): """simple docstring""" lowercase_ = self.run_trainer( eval_steps=1 , max_len=1_2 , model_name=lowerCamelCase_ , num_train_epochs=1 , distributed=lowerCamelCase_ , extra_args_str=lowerCamelCase_ , predict_with_generate=lowerCamelCase_ , do_train=lowerCamelCase_ , do_eval=lowerCamelCase_ , do_predict=lowerCamelCase_ , ) lowercase_ = TrainerState.load_from_json(os.path.join(lowerCamelCase_ , """trainer_state.json""")).log_history if not do_eval: return lowercase_ = [log for log in logs if """eval_loss""" in log.keys()] lowercase_ = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats lowercase_ = eval_metrics[-1] assert isinstance(last_step_stats["""eval_bleu"""] , lowerCamelCase_) assert not math.isnan(float(last_step_stats["""eval_loss"""])), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def _UpperCAmelCase ( self : Any): """simple docstring""" self.run_seqaseq_quick() @require_torch_multi_gpu def _UpperCAmelCase ( self : int): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase_) @require_torch_multi_gpu def _UpperCAmelCase ( self : Union[str, Any]): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase_) @unittest.skip("""Requires an update of the env running those tests""") @require_torch_multi_gpu @require_fairscale def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase_ , extra_args_str="""--sharded_ddp simple""") @unittest.skip("""Requires an update of the env running those tests""") @require_torch_multi_gpu @require_fairscale def _UpperCAmelCase ( self : Optional[int]): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase_ , extra_args_str="""--sharded_ddp simple --fp16""") @unittest.skip("""Requires an update of the env running those tests""") @require_torch_multi_gpu @require_fairscale def _UpperCAmelCase ( self : Dict): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase_ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=lowerCamelCase_) @unittest.skip("""Requires an update of the env running those tests""") @require_torch_multi_gpu @require_fairscale def _UpperCAmelCase ( self : int): """simple docstring""" self.run_seqaseq_quick( distributed=lowerCamelCase_ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=lowerCamelCase_) @require_apex @require_torch_gpu def _UpperCAmelCase ( self : int): """simple docstring""" self.run_seqaseq_quick(distributed=lowerCamelCase_ , extra_args_str="""--fp16 --fp16_backend=apex""") # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=lowerCamelCase_ , extra_args_str="""--fp16 --fp16_backend=apex""") @parameterized.expand(["""base""", """low""", """high""", """mixed"""]) @require_torch_multi_gpu def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Dict): """simple docstring""" lowercase_ = { # test with the default log_level - should be info and thus log info once """base""": {"""extra_args_str""": """""", """n_matches""": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes """low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica """high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1}, # test with high log_level and log_level_replica - should be quiet on all processes """mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0}, } lowercase_ = experiments[experiment_id] lowercase_ = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False} lowercase_ = """Running training""" with CaptureStderr() as cl: self.run_seqaseq_quick(**lowerCamelCase_ , extra_args_str=data["""extra_args_str"""]) lowercase_ = len(re.findall(lowerCamelCase_ , cl.err)) self.assertEqual(lowerCamelCase_ , data["""n_matches"""]) @slow def _UpperCAmelCase ( self : List[str]): """simple docstring""" lowercase_ = self.run_trainer( eval_steps=2 , max_len=1_2_8 , model_name=lowerCamelCase_ , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=lowerCamelCase_ , ) # Check metrics lowercase_ = TrainerState.load_from_json(os.path.join(lowerCamelCase_ , """trainer_state.json""")).log_history lowercase_ = [log for log in logs if """eval_loss""" in log.keys()] lowercase_ = eval_metrics[0] lowercase_ = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["""eval_bleu"""] , lowerCamelCase_) # test if do_predict saves generations and metrics lowercase_ = os.listdir(lowerCamelCase_) lowercase_ = {os.path.basename(lowerCamelCase_) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def _UpperCAmelCase ( self : str): """simple docstring""" from transformers.training_args import OptimizerNames def train_and_return_metrics(lowerCAmelCase_ : str) -> Tuple[int, float]: lowercase_ = """--skip_memory_metrics 0""" lowercase_ = self.run_trainer( max_len=1_2_8 , model_name=lowerCamelCase_ , learning_rate=3E-4 , num_train_epochs=1 , optim=lowerCamelCase_ , distributed=lowerCamelCase_ , extra_args_str=lowerCamelCase_ , do_eval=lowerCamelCase_ , do_predict=lowerCamelCase_ , n_gpus_to_use=1 , ) # Check metrics lowercase_ = TrainerState.load_from_json(Path(lowerCamelCase_ , """trainer_state.json""")).log_history lowercase_ = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**2_0) lowercase_ = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**2_0) lowercase_ = logs[0]["""train_loss"""] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss lowercase_ , lowercase_ , lowercase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value) lowercase_ , lowercase_ , lowercase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value) lowercase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb lowercase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig lowercase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb lowercase_ = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings lowercase_ = 1_2_0 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( lowerCamelCase_ , lowerCamelCase_ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got""" F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and''' F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , ) self.assertGreater( lowerCamelCase_ , lowerCamelCase_ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got""" F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and''' F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , ) self.assertEqual( lowerCamelCase_ , lowerCamelCase_ , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''') def _UpperCAmelCase ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : float = 3E-3 , lowerCAmelCase_ : str = "adafactor" , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = None , ): """simple docstring""" lowercase_ = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro""" lowercase_ = self.get_auto_remove_tmp_dir() lowercase_ = F''' --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --test_file {data_dir}/test.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_source_length {max_len} --max_target_length {max_len} --do_train --num_train_epochs {str(lowerCamelCase_)} --per_device_train_batch_size 4 --learning_rate {learning_rate} --warmup_steps 8 --logging_steps 0 --logging_strategy no --save_steps {str(lowerCamelCase_)} --group_by_length --label_smoothing_factor 0.1 --target_lang ro_RO --source_lang en_XX '''.split() lowercase_ = F''' --do_eval --per_device_eval_batch_size 4 --max_eval_samples 8 --val_max_target_length {max_len} --evaluation_strategy steps --eval_steps {str(lowerCamelCase_)} '''.split() lowercase_ = """ --do_predict """.split() lowercase_ = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F'''--optim {optim}'''.split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: lowercase_ = get_gpu_count() lowercase_ = get_torch_dist_unique_port() lowercase_ = F''' -m torch.distributed.run --nproc_per_node={n_gpus_to_use} --master_port={master_port} {self.examples_dir_str}/pytorch/translation/run_translation.py '''.split() lowercase_ = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowerCamelCase_ , env=self.get_env()) else: lowercase_ = ["""run_translation.py"""] + args with patch.object(lowerCamelCase_ , """argv""" , lowerCamelCase_): main() return output_dir
136
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ShapEPipeline __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __lowerCAmelCase = False @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return 8 @property def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(lowerCamelCase_ ) @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase = PriorTransformer(**lowerCamelCase_ ) return model @property def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase = ShapERenderer(**lowerCamelCase_ ) return model def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_text_encoder UpperCamelCase = self.dummy_tokenizer UpperCamelCase = self.dummy_renderer UpperCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , ) UpperCamelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ): """simple docstring""" if str(lowerCamelCase_ ).startswith("""mps""" ): UpperCamelCase = torch.manual_seed(lowerCamelCase_ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) UpperCamelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = """cpu""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) UpperCamelCase = output.images[0] UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self : Tuple ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch_device == """cpu""" UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase = batch_size * [inputs[key]] UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCamelCase = pipe( """a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
343
0
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger(__name__) def _snake_case ( _snake_case : Any , _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = RobertaPreLayerNormConfig.from_pretrained( UpperCamelCase_ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict _A = torch.load(hf_hub_download(repo_id=UpperCamelCase_ , filename='pytorch_model.bin' ) ) _A = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): _A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue _A = tensor_value _A = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=UpperCamelCase_ , config=UpperCamelCase_ , state_dict=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) # convert tokenizer _A = AutoTokenizer.from_pretrained(UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) a = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
315
from __future__ import annotations def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list: '''simple docstring''' UpperCamelCase = [] UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCamelCase = result + left + right return input_list def lowercase( UpperCamelCase_ ) -> list: '''simple docstring''' if len(UpperCamelCase_ ) <= 1: return input_list UpperCamelCase = list(UpperCamelCase_ ) # iteration for two-way merging UpperCamelCase = 2 while p <= len(UpperCamelCase_ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = i + p - 1 UpperCamelCase = (low + high + 1) // 2 UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # final merge of last two parts if p * 2 >= len(UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
343
0
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor lowerCamelCase = logging.get_logger(__name__) class A ( __lowerCAmelCase ): def __init__( self : Optional[Any] , *lowercase_ : Tuple , **lowercase_ : Dict ) -> int: """simple docstring""" warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.' , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
199
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = num_groups def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = BitModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = BitForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowerCAmelCase = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""Bit does not output attentions""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(config=lowerCamelCase_ ) for name, module in model.named_modules(): if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def lowerCamelCase_ ( self : int ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): UpperCamelCase = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : int ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitBackbone,) if is_torch_available() else () __lowerCAmelCase = BitConfig __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self )
343
0
"""simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 8.314_462 # Unit - J mol-1 K-1 def lowercase ( _snake_case : Union[str, Any] , _snake_case : Optional[int] , _snake_case : List[str] ) ->float: """simple docstring""" if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Optional[int] ) ->float: """simple docstring""" if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
102
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = TFResNetModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __lowerCAmelCase = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = TFResNetModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ): UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Any ): """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # forward pass UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
343
0
import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class _lowerCamelCase: def __init__( self, lowerCamelCase = "cpu", lowerCamelCase = "openai/clip-vit-large-patch14") -> Any: """simple docstring""" _lowercase : Dict = device _lowercase : Union[str, Any] = CLIPTokenizerFast.from_pretrained(lowerCamelCase_) _lowercase : int = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] _lowercase : Dict = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] _lowercase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std) _lowercase : Dict = torchvision.transforms.Resize(2_24) _lowercase : Optional[int] = torchvision.transforms.CenterCrop(2_24) def UpperCamelCase ( self, lowerCamelCase) -> List[str]: """simple docstring""" _lowercase : Optional[int] = self.resize(lowerCamelCase_) _lowercase : str = self.center_crop(lowerCamelCase_) _lowercase : Tuple = self.normalize(lowerCamelCase_) return images def __call__( self, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase) -> Dict: """simple docstring""" _lowercase : List[Any] = self.tokenizer(text=lowerCamelCase_, **lowerCamelCase_) _lowercase : Optional[Any] = self.preprocess_img(lowerCamelCase_) _lowercase : Optional[Any] = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class _lowerCamelCase( nn.Module ): def __init__( self, lowerCamelCase=10, lowerCamelCase=0.0_1, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase="image", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, ) -> Any: """simple docstring""" super().__init__() _lowercase : Dict = None _lowercase : int = device if device else get_device() if vqgan: _lowercase : int = vqgan else: _lowercase : Optional[Any] = load_vqgan(self.device, conf_path=lowerCamelCase_, ckpt_path=lowerCamelCase_) self.vqgan.eval() if clip: _lowercase : Tuple = clip else: _lowercase : List[str] = CLIPModel.from_pretrained('openai/clip-vit-base-patch32') self.clip.to(self.device) _lowercase : str = ProcessorGradientFlow(device=self.device) _lowercase : List[Any] = iterations _lowercase : int = lr _lowercase : Optional[Any] = log _lowercase : Dict = make_grid _lowercase : Optional[int] = return_val _lowercase : List[Any] = quantize _lowercase : Dict = self.vqgan.decoder.z_shape def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=5, lowerCamelCase=True) -> Tuple: """simple docstring""" _lowercase : str = [] if output_path is None: _lowercase : Any = './animation.gif' if input_path is None: _lowercase : Dict = self.save_path _lowercase : Tuple = sorted(glob(input_path + '/*')) if not len(lowerCamelCase_): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)') if len(lowerCamelCase_) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)') _lowercase : List[str] = total_duration / len(lowerCamelCase_) _lowercase : str = [frame_duration] * len(lowerCamelCase_) if extend_frames: _lowercase : Dict = 1.5 _lowercase : List[str] = 3 for file_name in paths: if file_name.endswith('.png'): images.append(imageio.imread(lowerCamelCase_)) imageio.mimsave(lowerCamelCase_, lowerCamelCase_, duration=lowerCamelCase_) print(F'''gif saved to {output_path}''') def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None) -> Any: """simple docstring""" if not (path or img): raise ValueError('Input either path or tensor') if img is not None: raise NotImplementedError _lowercase : Any = preprocess(Image.open(lowerCamelCase_), target_image_size=2_56).to(self.device) _lowercase : List[Any] = preprocess_vqgan(lowerCamelCase_) _lowercase , *_lowercase : List[Any] = self.vqgan.encode(lowerCamelCase_) return z def UpperCamelCase ( self, lowerCamelCase) -> Union[str, Any]: """simple docstring""" _lowercase : Any = self.latent.detach().requires_grad_() _lowercase : Optional[Any] = base_latent + transform_vector if self.quantize: _lowercase , *_lowercase : int = self.vqgan.quantize(lowerCamelCase_) else: _lowercase : Dict = trans_latent return self.vqgan.decode(lowerCamelCase_) def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=None) -> Optional[Any]: """simple docstring""" _lowercase : Optional[Any] = self.clip_preprocessor(text=lowerCamelCase_, images=lowerCamelCase_, return_tensors='pt', padding=lowerCamelCase_) _lowercase : str = self.clip(**lowerCamelCase_) _lowercase : List[Any] = clip_outputs.logits_per_image if weights is not None: _lowercase : Optional[int] = similarity_logits * weights return similarity_logits.sum() def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict: """simple docstring""" _lowercase : str = self._get_clip_similarity(pos_prompts['prompts'], lowerCamelCase_, weights=(1 / pos_prompts['weights'])) if neg_prompts: _lowercase : int = self._get_clip_similarity(neg_prompts['prompts'], lowerCamelCase_, weights=neg_prompts['weights']) else: _lowercase : Optional[Any] = torch.tensor([1], device=self.device) _lowercase : List[str] = -torch.log(lowerCamelCase_) + torch.log(lowerCamelCase_) return loss def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Union[str, Any]: """simple docstring""" _lowercase : Optional[int] = torch.randn_like(self.latent, requires_grad=lowerCamelCase_, device=self.device) _lowercase : Union[str, Any] = torch.optim.Adam([vector], lr=self.lr) for i in range(self.iterations): optim.zero_grad() _lowercase : Optional[int] = self._add_vector(lowerCamelCase_) _lowercase : Optional[int] = loop_post_process(lowerCamelCase_) _lowercase : str = self._get_CLIP_loss(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_) print('CLIP loss', lowerCamelCase_) if self.log: wandb.log({'CLIP Loss': clip_loss}) clip_loss.backward(retain_graph=lowerCamelCase_) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> str: """simple docstring""" wandb.init(reinit=lowerCamelCase_, project='face-editor') wandb.config.update({'Positive Prompts': positive_prompts}) wandb.config.update({'Negative Prompts': negative_prompts}) wandb.config.update({'lr': self.lr, 'iterations': self.iterations}) if image_path: _lowercase : Optional[int] = Image.open(lowerCamelCase_) _lowercase : int = image.resize((2_56, 2_56)) wandb.log('Original Image', wandb.Image(lowerCamelCase_)) def UpperCamelCase ( self, lowerCamelCase) -> Dict: """simple docstring""" if not prompts: return [] _lowercase : Tuple = [] _lowercase : Tuple = [] if isinstance(lowerCamelCase_, lowerCamelCase_): _lowercase : Optional[Any] = [prompt.strip() for prompt in prompts.split('|')] for prompt in prompts: if isinstance(lowerCamelCase_, (tuple, list)): _lowercase : Optional[int] = prompt[0] _lowercase : Optional[Any] = float(prompt[1]) elif ":" in prompt: _lowercase , _lowercase : str = prompt.split(':') _lowercase : Union[str, Any] = float(lowerCamelCase_) else: _lowercase : Any = prompt _lowercase : List[Any] = 1.0 processed_prompts.append(lowerCamelCase_) weights.append(lowerCamelCase_) return { "prompts": processed_prompts, "weights": torch.tensor(lowerCamelCase_, device=self.device), } def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=None, ) -> List[Any]: """simple docstring""" if image_path: _lowercase : List[str] = self._get_latent(lowerCamelCase_) else: _lowercase : Union[str, Any] = torch.randn(self.latent_dim, device=self.device) if self.log: self._init_logging(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_) assert pos_prompts, "You must provide at least one positive prompt." _lowercase : Optional[int] = self.process_prompts(lowerCamelCase_) _lowercase : Optional[int] = self.process_prompts(lowerCamelCase_) if save_final and save_path is None: _lowercase : int = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'])) if not os.path.exists(lowerCamelCase_): os.makedirs(lowerCamelCase_) else: _lowercase : Optional[int] = save_path + '_' + get_timestamp() os.makedirs(lowerCamelCase_) _lowercase : Union[str, Any] = save_path _lowercase : Tuple = self.vqgan.decode(self.latent)[0] if show_intermediate: print('Original Image') show_pil(custom_to_pil(lowerCamelCase_)) _lowercase : str = loop_post_process(lowerCamelCase_) for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_)): if show_intermediate: show_pil(lowerCamelCase_) if save_intermediate: transformed_img.save(os.path.join(self.save_path, F'''iter_{iter:03d}.png''')) if self.log: wandb.log({'Image': wandb.Image(lowerCamelCase_)}) if show_final: show_pil(lowerCamelCase_) if save_final: transformed_img.save(os.path.join(self.save_path, F'''iter_{iter:03d}_final.png'''))
21
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val def lowercase( UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase = """""" if is_panoptic: UpperCamelCase = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCamelCase = """resnet101""" if "dc5" in model_name: UpperCamelCase = True UpperCamelCase = """panoptic""" in model_name if is_panoptic: UpperCamelCase = 250 else: UpperCamelCase = 91 UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """coco-detection-id2label.json""" UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} # load image processor UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection""" UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ ) # prepare image UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) UpperCamelCase = encoding["""pixel_values"""] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval() UpperCamelCase = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCamelCase = """conditional_detr.""" + src rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val # finally, create HuggingFace model and load state dict UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion UpperCamelCase = conditional_detr(UpperCamelCase_ ) UpperCamelCase = model(UpperCamelCase_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
343
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class UpperCamelCase : UpperCAmelCase : Optional[Any] = LEDConfig UpperCAmelCase : int = {} UpperCAmelCase : int = """gelu""" def __init__(self : Tuple , _A : Optional[int] , _A : List[str]=13 , _A : List[Any]=7 , _A : Any=True , _A : Union[str, Any]=False , _A : List[Any]=99 , _A : Optional[int]=32 , _A : Optional[int]=2 , _A : int=4 , _A : List[Any]=37 , _A : List[Any]=0.1 , _A : int=0.1 , _A : int=20 , _A : Tuple=2 , _A : Union[str, Any]=1 , _A : str=0 , _A : str=4 , ) -> List[Any]: __snake_case : Dict = parent __snake_case : Any = batch_size __snake_case : List[Any] = seq_length __snake_case : Optional[int] = is_training __snake_case : Optional[Any] = use_labels __snake_case : List[Any] = vocab_size __snake_case : Optional[int] = hidden_size __snake_case : Optional[int] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : List[str] = intermediate_size __snake_case : Any = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Dict = max_position_embeddings __snake_case : Any = eos_token_id __snake_case : str = pad_token_id __snake_case : int = bos_token_id __snake_case : List[Any] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after __snake_case : Optional[int] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests __snake_case : Optional[Any] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def _lowercase (self : int) -> Dict: __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) __snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) __snake_case : int = tf.concat([input_ids, eos_tensor] , axis=1) __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) __snake_case : Optional[Any] = prepare_led_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) __snake_case : int = tf.concat( [tf.zeros_like(lowerCamelCase_)[:, :-1], tf.ones_like(lowerCamelCase_)[:, -1:]] , axis=-1 , ) __snake_case : Optional[int] = global_attention_mask return config, inputs_dict def _lowercase (self : Any , _A : List[str] , _A : Any) -> Optional[Any]: __snake_case : Dict = TFLEDModel(config=lowerCamelCase_).get_decoder() __snake_case : int = inputs_dict['input_ids'] __snake_case : Any = input_ids[:1, :] __snake_case : Tuple = inputs_dict['attention_mask'][:1, :] __snake_case : str = 1 # first forward pass __snake_case : Optional[Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , use_cache=lowerCamelCase_) __snake_case , __snake_case : Any = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __snake_case : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size) __snake_case : Tuple = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and __snake_case : Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1) __snake_case : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1) __snake_case : Union[str, Any] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_)[0] __snake_case : List[str] = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , past_key_values=lowerCamelCase_)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice __snake_case : str = int(ids_tensor((1,) , output_from_past.shape[-1])) __snake_case : List[Any] = output_from_no_past[:, -3:, random_slice_idx] __snake_case : List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowerCamelCase_ , lowerCamelCase_ , rtol=1E-3) def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[Any]=None , ) -> int: '''simple docstring''' if attention_mask is None: __snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __snake_case : Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __snake_case : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class UpperCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): UpperCAmelCase : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () UpperCAmelCase : Any = (TFLEDForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase : Tuple = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase : str = True UpperCAmelCase : List[Any] = False UpperCAmelCase : Optional[Any] = False UpperCAmelCase : Union[str, Any] = False def _lowercase (self : Tuple) -> Any: __snake_case : List[str] = TFLEDModelTester(self) __snake_case : Any = ConfigTester(self , config_class=lowerCamelCase_) def _lowercase (self : str) -> Optional[int]: self.config_tester.run_common_tests() def _lowercase (self : int) -> List[str]: __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase_) def _lowercase (self : List[Any]) -> int: __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Any = tf.zeros_like(inputs_dict['attention_mask']) __snake_case : Optional[int] = 2 __snake_case : Optional[Any] = tf.where( tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , ) __snake_case : Union[str, Any] = True __snake_case : Any = self.model_tester.seq_length __snake_case : Optional[int] = self.model_tester.encoder_seq_length def check_decoder_attentions_output(_A : str): __snake_case : Optional[Any] = outputs.decoder_attentions self.assertEqual(len(lowerCamelCase_) , self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(_A : Any): __snake_case : List[Any] = [t.numpy() for t in outputs.encoder_attentions] __snake_case : Union[str, Any] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowerCamelCase_) , self.model_tester.num_hidden_layers) self.assertEqual(len(lowerCamelCase_) , self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: __snake_case : Any = True __snake_case : str = False __snake_case : int = False __snake_case : str = model_class(lowerCamelCase_) __snake_case : Optional[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_)) __snake_case : Any = len(lowerCamelCase_) self.assertEqual(config.output_hidden_states , lowerCamelCase_) check_encoder_attentions_output(lowerCamelCase_) if self.is_encoder_decoder: __snake_case : Dict = model_class(lowerCamelCase_) __snake_case : Optional[int] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_)) self.assertEqual(config.output_hidden_states , lowerCamelCase_) check_decoder_attentions_output(lowerCamelCase_) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __snake_case : Any = True __snake_case : Dict = model_class(lowerCamelCase_) __snake_case : List[Any] = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_)) self.assertEqual(config.output_hidden_states , lowerCamelCase_) check_encoder_attentions_output(lowerCamelCase_) # Check attention is always last and order is fine __snake_case : Optional[Any] = True __snake_case : List[str] = True __snake_case : str = model_class(lowerCamelCase_) __snake_case : Dict = model(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_)) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase_)) self.assertEqual(model.config.output_hidden_states , lowerCamelCase_) check_encoder_attentions_output(lowerCamelCase_) @unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.') def _lowercase (self : Any) -> Tuple: pass def _lowercase (self : Optional[int]) -> List[str]: pass def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> Tuple: '''simple docstring''' return tf.constant(UpperCamelCase_ , dtype=tf.intaa ) _a : List[str]= 1e-4 @slow @require_tf class UpperCamelCase ( unittest.TestCase ): def _lowercase (self : Optional[Any]) -> Any: __snake_case : Optional[int] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384').led # change to intended input here __snake_case : Union[str, Any] = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]]) __snake_case : List[Any] = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]]) __snake_case : Any = prepare_led_inputs_dict(model.config , lowerCamelCase_ , lowerCamelCase_) __snake_case : Tuple = model(**lowerCamelCase_)[0] __snake_case : int = (1, 10_24, 7_68) self.assertEqual(output.shape , lowerCamelCase_) # change to expected output here __snake_case : Optional[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-3) def _lowercase (self : Any) -> Dict: __snake_case : Dict = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384') # change to intended input here __snake_case : Union[str, Any] = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]]) __snake_case : List[str] = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]]) __snake_case : Any = prepare_led_inputs_dict(model.config , lowerCamelCase_ , lowerCamelCase_) __snake_case : int = model(**lowerCamelCase_)[0] __snake_case : Union[str, Any] = (1, 10_24, model.config.vocab_size) self.assertEqual(output.shape , lowerCamelCase_) # change to expected output here __snake_case : List[Any] = tf.convert_to_tensor( [[33.65_07, 6.4_572, 16.80_89], [5.8_739, -2.4_238, 11.29_02], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1E-3 , rtol=1E-3)
172
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = mask_ratio UpperCamelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) # expected sequence length = num_patches UpperCamelCase = (self.image_size // self.patch_size) ** 2 UpperCamelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) UpperCamelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self : str ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = outputs_dict[0].numpy() UpperCamelCase = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ): UpperCamelCase = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCamelCase_ ): UpperCamelCase = v.numpy() else: UpperCamelCase = np.array(lowerCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.constant(lowerCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase = tf_noise super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(lowerCamelCase_ ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ ) } UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: UpperCamelCase = main_layer_class(lowerCamelCase_ ) UpperCamelCase = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) ) UpperCamelCase = model(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" ) model.save(lowerCamelCase_ ) UpperCamelCase = tf.keras.models.load_model( lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(lowerCamelCase_ , tf.keras.Model ) UpperCamelCase = model(lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = outputs.last_hidden_state.numpy() UpperCamelCase = 0 else: UpperCamelCase = outputs.logits.numpy() UpperCamelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ ) UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = after_outputs["""last_hidden_state"""].numpy() UpperCamelCase = 0 else: UpperCamelCase = after_outputs["""logits"""].numpy() UpperCamelCase = 0 UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCamelCase_ ) UpperCamelCase = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCamelCase = model_class.from_config(model.config ) UpperCamelCase = new_model(lowerCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> int: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase = ViTMAEConfig() UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
343
0
'''simple docstring''' import qiskit def lowerCAmelCase_ ( snake_case__ = 2 ): '''simple docstring''' A : List[Any] = qubits # Using Aer's simulator A : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' ) # Creating a Quantum Circuit acting on the q register A : Tuple = qiskit.QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , UpperCamelCase_ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , UpperCamelCase_ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(UpperCamelCase_ ) ) , list(range(UpperCamelCase_ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator A : List[Any] = qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000 ) return job.result().get_counts(UpperCamelCase_ ) if __name__ == "__main__": print(f'''Total count for various states are: {quantum_entanglement(3)}''')
3
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: '''simple docstring''' # Base Case if index == len(UpperCamelCase_ ): return True # Recursive Step for i in range(UpperCamelCase_ ): if valid_coloring(graph[index] , UpperCamelCase_ , UpperCamelCase_ ): # Color current vertex UpperCamelCase = i # Validate coloring if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , index + 1 ): return True # Backtrack UpperCamelCase = -1 return False def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> list[int]: '''simple docstring''' UpperCamelCase = [-1] * len(UpperCamelCase_ ) if util_color(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , 0 ): return colored_vertices return []
343
0
'''simple docstring''' import os import warnings from typing import List, Optional from ...tokenization_utils_base import BatchEncoding from ...utils import logging from .configuration_rag import RagConfig __lowercase : Union[str, Any] = logging.get_logger(__name__) class __lowercase : def __init__(self , A , A ): lowerCamelCase_ : Any = question_encoder lowerCamelCase_ : str = generator lowerCamelCase_ : List[str] = self.question_encoder def UpperCAmelCase__ (self , A ): if os.path.isfile(lowerCamelCase_ ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) lowerCamelCase_ : Optional[Any] = os.path.join(lowerCamelCase_ , '''question_encoder_tokenizer''' ) lowerCamelCase_ : Any = os.path.join(lowerCamelCase_ , '''generator_tokenizer''' ) self.question_encoder.save_pretrained(lowerCamelCase_ ) self.generator.save_pretrained(lowerCamelCase_ ) @classmethod def UpperCAmelCase__ (cls , A , **A ): from ..auto.tokenization_auto import AutoTokenizer lowerCamelCase_ : int = kwargs.pop('''config''' , lowerCamelCase_ ) if config is None: lowerCamelCase_ : Union[str, Any] = RagConfig.from_pretrained(lowerCamelCase_ ) lowerCamelCase_ : Any = AutoTokenizer.from_pretrained( lowerCamelCase_ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' ) lowerCamelCase_ : Optional[int] = AutoTokenizer.from_pretrained( lowerCamelCase_ , config=config.generator , subfolder='''generator_tokenizer''' ) return cls(question_encoder=lowerCamelCase_ , generator=lowerCamelCase_ ) def __call__(self , *A , **A ): return self.current_tokenizer(*lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase__ (self , *A , **A ): return self.generator.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase__ (self , *A , **A ): return self.generator.decode(*lowerCamelCase_ , **lowerCamelCase_ ) def UpperCAmelCase__ (self ): lowerCamelCase_ : List[str] = self.question_encoder def UpperCAmelCase__ (self ): lowerCamelCase_ : Optional[int] = self.generator def UpperCAmelCase__ (self , A , A = None , A = None , A = None , A = "longest" , A = None , A = True , **A , ): warnings.warn( '''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the ''' '''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` ''' '''context manager to prepare your targets. See the documentation of your specific tokenizer for more ''' '''details''' , lowerCamelCase_ , ) if max_length is None: lowerCamelCase_ : str = self.current_tokenizer.model_max_length lowerCamelCase_ : Tuple = self( lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , max_length=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , **lowerCamelCase_ , ) if tgt_texts is None: return model_inputs # Process tgt_texts if max_target_length is None: lowerCamelCase_ : Optional[int] = self.current_tokenizer.model_max_length lowerCamelCase_ : Union[str, Any] = self( text_target=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_tensors=lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , **lowerCamelCase_ , ) lowerCamelCase_ : Optional[Any] = labels['''input_ids'''] return model_inputs
318
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED _SCREAMING_SNAKE_CASE = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } _SCREAMING_SNAKE_CASE = { """allenai/led-base-16384""": 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowercase( ) -> List[str]: '''simple docstring''' UpperCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) UpperCamelCase = bs[:] UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(UpperCamelCase_ ) cs.append(2**8 + n ) n += 1 UpperCamelCase = [chr(UpperCamelCase_ ) for n in cs] return dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) def lowercase( UpperCamelCase_ ) -> List[str]: '''simple docstring''' UpperCamelCase = set() UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = VOCAB_FILES_NAMES __lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCAmelCase = ["""input_ids""", """attention_mask"""] def __init__( self : str , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str="replace" , lowerCamelCase_ : Any="<s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : List[Any]="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : str="<unk>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[str]="<mask>" , lowerCamelCase_ : str=False , **lowerCamelCase_ : str , ): """simple docstring""" UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: UpperCamelCase = json.load(lowerCamelCase_ ) UpperCamelCase = {v: k for k, v in self.encoder.items()} UpperCamelCase = errors # how to handle errors in decoding UpperCamelCase = bytes_to_unicode() UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle: UpperCamelCase = merges_handle.read().split("""\n""" )[1:-1] UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) UpperCamelCase = {} UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCamelCase = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase_ ( self : str ): """simple docstring""" return len(self.encoder ) def lowerCamelCase_ ( self : str ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Dict ): """simple docstring""" if token in self.cache: return self.cache[token] UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = get_pairs(lowerCamelCase_ ) if not pairs: return token while True: UpperCamelCase = min(lowerCamelCase_ , key=lambda lowerCamelCase_ : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break UpperCamelCase , UpperCamelCase = bigram UpperCamelCase = [] UpperCamelCase = 0 while i < len(lowerCamelCase_ ): try: UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCamelCase = j if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCamelCase = tuple(lowerCamelCase_ ) UpperCamelCase = new_word if len(lowerCamelCase_ ) == 1: break else: UpperCamelCase = get_pairs(lowerCamelCase_ ) UpperCamelCase = """ """.join(lowerCamelCase_ ) UpperCamelCase = word return word def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = [] for token in re.findall(self.pat , lowerCamelCase_ ): UpperCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str ): """simple docstring""" return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Any ): """simple docstring""" return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = """""".join(lowerCamelCase_ ) UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): """simple docstring""" if not os.path.isdir(lowerCamelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCamelCase = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) UpperCamelCase = 0 with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) UpperCamelCase = token_index writer.write(""" """.join(lowerCamelCase_ ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCamelCase = [self.cls_token_id] UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()): UpperCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , ): """simple docstring""" UpperCamelCase = super()._pad( encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , ) # Load from model defaults if return_attention_mask is None: UpperCamelCase = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCamelCase = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCamelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(lowerCamelCase_ ) if needs_to_be_padded: UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCamelCase = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": UpperCamelCase = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
343
0
import requests A : List[Any] = '' # <-- Put your OpenWeatherMap appid here! A : List[str] = 'https://api.openweathermap.org/data/2.5/' def __lowerCAmelCase ( a__ = "Chicago" , a__ = APPID ) -> dict: return requests.get(URL_BASE + '''weather''' , params=locals() ).json() def __lowerCAmelCase ( a__ = "Kolkata, India" , a__ = APPID ) -> dict: return requests.get(URL_BASE + '''forecast''' , params=locals() ).json() def __lowerCAmelCase ( a__ = 55.68 , a__ = 12.57 , a__ = APPID ) -> dict: return requests.get(URL_BASE + '''onecall''' , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: A : int = input('Enter a location:').strip() if location: pprint(current_weather(location)) else: break
6
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand _SCREAMING_SNAKE_CASE = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) _SCREAMING_SNAKE_CASE = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]), ("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) _SCREAMING_SNAKE_CASE = ( ("""JH AH TH KH QH""", 2_3), ("""JH 9H TH KH QH""", 2_2), ("""JC KH JS JD JH""", 2_1), ("""KH KC 3S 3H 3D""", 2_0), ("""8C 9C 5C 3C TC""", 1_9), ("""JS QS 9H TS KH""", 1_8), ("""7C 7S KH 2H 7H""", 1_7), ("""3C KH 5D 5S KH""", 1_6), ("""QH 8H KD JH 8S""", 1_5), ("""2D 6D 9D TH 7D""", 1_4), ) def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase , UpperCamelCase = randrange(len(UpperCamelCase_ ) ), randrange(len(UpperCamelCase_ ) ) UpperCamelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] UpperCamelCase , UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def lowercase( UpperCamelCase_ = 100 ) -> List[Any]: '''simple docstring''' return (generate_random_hand() for _ in range(UpperCamelCase_ )) @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: '''simple docstring''' UpperCamelCase = PokerHand(UpperCamelCase_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' assert PokerHand(UpperCamelCase_ )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , UpperCamelCase_ ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int: '''simple docstring''' assert PokerHand(UpperCamelCase_ ).compare_with(PokerHand(UpperCamelCase_ ) ) == expected def lowercase( ) -> Dict: '''simple docstring''' UpperCamelCase = [PokerHand(UpperCamelCase_ ) for hand in SORTED_HANDS] UpperCamelCase = poker_hands.copy() shuffle(UpperCamelCase_ ) UpperCamelCase = chain(sorted(UpperCamelCase_ ) ) for index, hand in enumerate(UpperCamelCase_ ): assert hand == poker_hands[index] def lowercase( ) -> Union[str, Any]: '''simple docstring''' # Test that five high straights are compared correctly. UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=UpperCamelCase_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def lowercase( ) -> str: '''simple docstring''' # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" ) UpperCamelCase = True UpperCamelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def lowercase( ) -> int: '''simple docstring''' # Problem number 54 from Project Euler # Testing from poker_hands.txt file UpperCamelCase = 0 UpperCamelCase = os.path.abspath(os.path.dirname(UpperCamelCase_ ) ) UpperCamelCase = os.path.join(UpperCamelCase_ , """poker_hands.txt""" ) with open(UpperCamelCase_ ) as file_hand: for line in file_hand: UpperCamelCase = line[:14].strip() UpperCamelCase = line[15:].strip() UpperCamelCase , UpperCamelCase = PokerHand(UpperCamelCase_ ), PokerHand(UpperCamelCase_ ) UpperCamelCase = player.compare_with(UpperCamelCase_ ) if output == "Win": answer += 1 assert answer == 376
343
0
'''simple docstring''' from collections.abc import Sequence def _lowerCamelCase ( lowercase : List[str] = None ) -> int: if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) _a = nums[0] for i in range(1 , len(UpperCamelCase_ ) ): _a = nums[i] _a = max(UpperCamelCase_ , ans + num , UpperCamelCase_ ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user lowerCAmelCase_ : int = int(input('Enter number of elements : ').strip()) lowerCAmelCase_ : List[str] = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n] print(max_subsequence_sum(array))
63
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/config.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """xlnet""" __lowerCAmelCase = ["""mems"""] __lowerCAmelCase = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , lowerCamelCase_ : Any=3_2000 , lowerCamelCase_ : Dict=1024 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : Dict=16 , lowerCamelCase_ : List[Any]=4096 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Union[str, Any]="bi" , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=1E-12 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Union[str, Any]=512 , lowerCamelCase_ : Any=None , lowerCamelCase_ : str=True , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : Union[str, Any]="last" , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : str="tanh" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Dict=5 , lowerCamelCase_ : str=5 , lowerCamelCase_ : Optional[int]=5 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=2 , **lowerCamelCase_ : List[Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = n_layer UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(f"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( f"""`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})""" ) UpperCamelCase = d_model // n_head UpperCamelCase = ff_activation UpperCamelCase = d_inner UpperCamelCase = untie_r UpperCamelCase = attn_type UpperCamelCase = initializer_range UpperCamelCase = layer_norm_eps UpperCamelCase = dropout UpperCamelCase = mem_len UpperCamelCase = reuse_len UpperCamelCase = bi_data UpperCamelCase = clamp_len UpperCamelCase = same_length UpperCamelCase = summary_type UpperCamelCase = summary_use_proj UpperCamelCase = summary_activation UpperCamelCase = summary_last_dropout UpperCamelCase = start_n_top UpperCamelCase = end_n_top UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( """The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`""" """ instead.""" , lowerCamelCase_ , ) UpperCamelCase = kwargs["""use_cache"""] UpperCamelCase = use_mems_eval UpperCamelCase = use_mems_train super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" ) return -1 @max_position_embeddings.setter def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" raise NotImplementedError( f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
343
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], "configuration_maskformer_swin": ["MaskFormerSwinConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["MaskFormerFeatureExtractor"] _SCREAMING_SNAKE_CASE = ["MaskFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", ] _SCREAMING_SNAKE_CASE = [ "MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure)
158
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""") class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = 0 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved UpperCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase_ ( self : Any ): """simple docstring""" class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = True try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
343
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : List[str] = { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): lowercase__ = "gpt_neox" def __init__( self : Tuple , lowerCAmelCase_ : Tuple=5_0_4_3_2 , lowerCAmelCase_ : Optional[Any]=6_1_4_4 , lowerCAmelCase_ : Any=4_4 , lowerCAmelCase_ : List[str]=6_4 , lowerCAmelCase_ : int=2_4_5_7_6 , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Any=0.25 , lowerCAmelCase_ : List[Any]=1_0_0_0_0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=2_0_4_8 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Optional[Any]=1E-5 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Optional[int]=0 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : int , ): """simple docstring""" super().__init__(bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_) lowercase_ = vocab_size lowercase_ = max_position_embeddings lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = rotary_pct lowercase_ = rotary_emb_base lowercase_ = attention_dropout lowercase_ = hidden_dropout lowercase_ = classifier_dropout lowercase_ = initializer_range lowercase_ = layer_norm_eps lowercase_ = use_cache lowercase_ = tie_word_embeddings lowercase_ = use_parallel_residual lowercase_ = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( """The hidden size is not divisble by the number of attention heads! Make sure to update them!""") def _UpperCAmelCase ( self : Any): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowerCamelCase_) or len(self.rope_scaling) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F'''got {self.rope_scaling}''') lowercase_ = self.rope_scaling.get("""type""" , lowerCamelCase_) lowercase_ = self.rope_scaling.get("""factor""" , lowerCamelCase_) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''') if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
136
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X2_0000 and cp <= 0X2_A6DF) # or (cp >= 0X2_A700 and cp <= 0X2_B73F) # or (cp >= 0X2_B740 and cp <= 0X2_B81F) # or (cp >= 0X2_B820 and cp <= 0X2_CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2_F800 and cp <= 0X2_FA1F) # ): # return True return False def lowercase( UpperCamelCase_ ) -> Dict: '''simple docstring''' # word like '180' or '身高' or '神' for char in word: UpperCamelCase = ord(UpperCamelCase_ ) if not _is_chinese_char(UpperCamelCase_ ): return 0 return 1 def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' UpperCamelCase = set() for token in tokens: UpperCamelCase = len(UpperCamelCase_ ) > 1 and is_chinese(UpperCamelCase_ ) if chinese_word: word_set.add(UpperCamelCase_ ) UpperCamelCase = list(UpperCamelCase_ ) return word_list def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' if not chinese_word_set: return bert_tokens UpperCamelCase = max([len(UpperCamelCase_ ) for w in chinese_word_set] ) UpperCamelCase = bert_tokens UpperCamelCase , UpperCamelCase = 0, len(UpperCamelCase_ ) while start < end: UpperCamelCase = True if is_chinese(bert_word[start] ): UpperCamelCase = min(end - start , UpperCamelCase_ ) for i in range(UpperCamelCase_ , 1 , -1 ): UpperCamelCase = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): UpperCamelCase = """##""" + bert_word[j] UpperCamelCase = start + i UpperCamelCase = False break if single_word: start += 1 return bert_word def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = ltp_tokenizer.seg(lines[i : i + 100] )[0] UpperCamelCase = [get_chinese_word(UpperCamelCase_ ) for r in res] ltp_res.extend(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for i in range(0 , len(UpperCamelCase_ ) , 100 ): UpperCamelCase = bert_tokenizer(lines[i : i + 100] , add_special_tokens=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=512 ) bert_res.extend(res["""input_ids"""] ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) UpperCamelCase = [] for input_ids, chinese_word in zip(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase = [] for id in input_ids: UpperCamelCase = bert_tokenizer._convert_id_to_token(UpperCamelCase_ ) input_tokens.append(UpperCamelCase_ ) UpperCamelCase = add_sub_symbol(UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(UpperCamelCase_ ): if token[:2] == "##": UpperCamelCase = token[2:] # save chinese tokens' pos if len(UpperCamelCase_ ) == 1 and _is_chinese_char(ord(UpperCamelCase_ ) ): ref_id.append(UpperCamelCase_ ) ref_ids.append(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) return ref_ids def lowercase( UpperCamelCase_ ) -> List[Any]: '''simple docstring''' # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: UpperCamelCase = f.readlines() UpperCamelCase = [line.strip() for line in data if len(UpperCamelCase_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' UpperCamelCase = LTP(args.ltp ) # faster in GPU device UpperCamelCase = BertTokenizer.from_pretrained(args.bert ) UpperCamelCase = prepare_ref(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: UpperCamelCase = [json.dumps(UpperCamelCase_ ) + """\n""" for ref in ref_ids] f.writelines(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""" ) parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""") parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""") _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
343
0
"""simple docstring""" import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() a = logging.get_logger(__name__) a = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def _snake_case ( _snake_case : int ) -> List[str]: '''simple docstring''' if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _A = k.replace(UpperCamelCase_ , UpperCamelCase_ ) if k.startswith('encoder' ): _A = k.replace('.attn' , '.self_attn' ) _A = k.replace('norm1' , 'self_attn_layer_norm' ) _A = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): _A = k.replace('norm1' , 'self_attn_layer_norm' ) _A = k.replace('norm2' , 'encoder_attn_layer_norm' ) _A = k.replace('norm3' , 'final_layer_norm' ) return k def _snake_case ( _snake_case : Any ) -> Tuple: '''simple docstring''' _A = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: _A = sd.pop(UpperCamelCase_ ) _A = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd _A = v a = ['''START'''] @torch.no_grad() def _snake_case ( _snake_case : Tuple , _snake_case : List[str] , _snake_case : int ) -> int: '''simple docstring''' _A = torch.load(UpperCamelCase_ , map_location='cpu' ) _A = model['model'] _A = BlenderbotConfig.from_json_file(UpperCamelCase_ ) _A = BlenderbotForConditionalGeneration(UpperCamelCase_ ) _A = m.model.state_dict().keys() _A = [] _A = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _A = rename_state_dict_key(UpperCamelCase_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _A = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(UpperCamelCase_ ) m.model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) m.half() m.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) a = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
315
import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : str=2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[str]=32 , lowerCamelCase_ : Union[str, Any]=5 , lowerCamelCase_ : Optional[Any]=4 , lowerCamelCase_ : Any=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=0.0_2 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = num_patches + 1 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, pixel_values def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = FlaxViTModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) UpperCamelCase = (self.image_size, self.image_size) UpperCamelCase = (self.patch_size, self.patch_size) UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ): """simple docstring""" UpperCamelCase = self.type_sequence_label_size UpperCamelCase = FlaxViTForImageClassification(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = FlaxViTForImageClassification(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = FlaxViTModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowerCamelCase_ : Any , **lowerCamelCase_ : Any ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest("""JIT Enabled""" ): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): UpperCamelCase = model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained("""google/vit-base-patch16-224""" ) UpperCamelCase = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(lowerCamelCase_ )
343
0
from __future__ import annotations def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : List[Any] = None ): '''simple docstring''' if start is None: _lowerCamelCase : List[str] =0 if end is None: _lowerCamelCase : Tuple =len(UpperCamelCase_ ) - 1 if start >= end: return _lowerCamelCase : Optional[Any] =(start + end) // 2 slowsort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) slowsort(UpperCamelCase_ , mid + 1 , UpperCamelCase_ ) if sequence[end] < sequence[mid]: _lowerCamelCase , _lowerCamelCase : Union[str, Any] =sequence[mid], sequence[end] slowsort(UpperCamelCase_ , UpperCamelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
199
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ : def __init__( self : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : str=13 , lowerCamelCase_ : Any=7 , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Dict=99 , lowerCamelCase_ : str=24 , lowerCamelCase_ : Optional[int]=2 , lowerCamelCase_ : List[str]=6 , lowerCamelCase_ : List[Any]=37 , lowerCamelCase_ : int="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any=512 , lowerCamelCase_ : List[Any]=16 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=1000 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_input_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = range_bbox def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase = bbox[i, j, 3] UpperCamelCase = bbox[i, j, 1] UpperCamelCase = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase = bbox[i, j, 2] UpperCamelCase = bbox[i, j, 0] UpperCamelCase = t UpperCamelCase = None if self.use_input_mask: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = None UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ , token_type_ids=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , bbox=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = LiltForTokenClassification(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , ): """simple docstring""" UpperCamelCase = LiltForQuestionAnswering(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model( lowerCamelCase_ , bbox=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) = config_and_inputs UpperCamelCase = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __lowerCAmelCase = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict ): """simple docstring""" return True def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = LiltModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Any ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase = type self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = LiltModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowerCamelCase_ ) UpperCamelCase = torch.tensor([[1, 2]] , device=lowerCamelCase_ ) UpperCamelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(input_ids=lowerCamelCase_ , bbox=lowerCamelCase_ ) UpperCamelCase = torch.Size([1, 2, 768] ) UpperCamelCase = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=lowerCamelCase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowerCamelCase_ , atol=1E-3 ) )
343
0
"""simple docstring""" import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.util.find_spec("""s3fs""") is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 SCREAMING_SNAKE_CASE : List[str] = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(F'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def lowercase ( _snake_case : str ) ->str: """simple docstring""" if "://" in dataset_path: __snake_case : Dict = dataset_path.split('''://''' )[1] return dataset_path def lowercase ( _snake_case : Tuple ) ->bool: """simple docstring""" if fs is not None and fs.protocol != "file": return True else: return False def lowercase ( _snake_case : Optional[int] , _snake_case : str , _snake_case : str ) ->int: """simple docstring""" __snake_case : str = not is_remote_filesystem(UpperCamelCase_ ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(UpperCamelCase_ ) , fs._strip_protocol(UpperCamelCase_ ) ) else: fs.mv(UpperCamelCase_ , UpperCamelCase_ , recursive=UpperCamelCase_ ) def lowercase ( ) ->None: """simple docstring""" if hasattr(fsspec.asyn , '''reset_lock''' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: __snake_case : Union[str, Any] = None __snake_case : List[str] = None __snake_case : Tuple = threading.Lock()
102
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : str , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict=7 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Any=30 , lowerCamelCase_ : str=400 , lowerCamelCase_ : str=True , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=0.9 , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase_ : Any=[0.5, 0.5, 0.5] , ): """simple docstring""" UpperCamelCase = size if size is not None else {"""shortest_edge""": 30} UpperCamelCase = crop_size if crop_size is not None else {"""height""": 30, """width""": 30} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize_and_center_crop UpperCamelCase = size UpperCamelCase = crop_pct UpperCamelCase = crop_size UpperCamelCase = do_normalize UpperCamelCase = image_mean UpperCamelCase = image_std def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = PoolFormerImageProcessingTester(self ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize_and_center_crop""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """crop_pct""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """image_std""" ) ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 30} ) self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched UpperCamelCase = image_processing(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
343
0
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params SCREAMING_SNAKE_CASE : Dict = getLogger(__name__) SCREAMING_SNAKE_CASE : int = "cuda" if torch.cuda.is_available() else "cpu" def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 8 , lowerCamelCase_ = DEFAULT_DEVICE , lowerCamelCase_=False , lowerCamelCase_="summarization" , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Dict: _lowercase : Any = Path(UpperCamelCase_ ).open('w' , encoding='utf-8' ) _lowercase : Tuple = str(UpperCamelCase_ ) _lowercase : Dict = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ ).to(UpperCamelCase_ ) if fpaa: _lowercase : List[str] = model.half() _lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type. _lowercase : Tuple = time.time() # update config with task specific params use_task_specific_params(UpperCamelCase_ , UpperCamelCase_ ) if prefix is None: _lowercase : List[Any] = prefix or getattr(model.config , 'prefix' , '' ) or '' for examples_chunk in tqdm(list(chunks(UpperCamelCase_ , UpperCamelCase_ ) ) ): _lowercase : List[Any] = [prefix + text for text in examples_chunk] _lowercase : str = tokenizer(UpperCamelCase_ , return_tensors='pt' , truncation=UpperCamelCase_ , padding='longest' ).to(UpperCamelCase_ ) _lowercase : Optional[int] = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **UpperCamelCase_ , ) _lowercase : str = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) for hypothesis in dec: fout.write(hypothesis + '\n' ) fout.flush() fout.close() _lowercase : Union[str, Any] = int(time.time() - start_time ) # seconds _lowercase : Union[str, Any] = len(UpperCamelCase_ ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def UpperCamelCase_( ) -> List[Any]: return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' ) def UpperCamelCase_( lowerCamelCase_=True ) -> Any: _lowercase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('model_name' , type=UpperCamelCase_ , help='like facebook/bart-large-cnn,t5-base, etc.' ) parser.add_argument('input_path' , type=UpperCamelCase_ , help='like cnn_dm/test.source' ) parser.add_argument('save_path' , type=UpperCamelCase_ , help='where to save summaries' ) parser.add_argument('--reference_path' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='like cnn_dm/test.target' ) parser.add_argument('--score_path' , type=UpperCamelCase_ , required=UpperCamelCase_ , default='metrics.json' , help='where to save metrics' ) parser.add_argument('--device' , type=UpperCamelCase_ , required=UpperCamelCase_ , default=UpperCamelCase_ , help='cuda, cuda:1, cpu etc.' ) parser.add_argument( '--prefix' , type=UpperCamelCase_ , required=UpperCamelCase_ , default=UpperCamelCase_ , help='will be added to the begininng of src examples' ) parser.add_argument('--task' , type=UpperCamelCase_ , default='summarization' , help='used for task_specific_params + metrics' ) parser.add_argument('--bs' , type=UpperCamelCase_ , default=8 , required=UpperCamelCase_ , help='batch size' ) parser.add_argument( '--n_obs' , type=UpperCamelCase_ , default=-1 , required=UpperCamelCase_ , help='How many observations. Defaults to all.' ) parser.add_argument('--fp16' , action='store_true' ) parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' ) parser.add_argument( '--info' , nargs='?' , type=UpperCamelCase_ , const=datetime_now() , help=( 'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.' ' lang=en-ru. If no value is passed, the current datetime string will be used.' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate _lowercase , _lowercase : str = parser.parse_known_args() _lowercase : List[Any] = parse_numeric_n_bool_cl_kwargs(UpperCamelCase_ ) if parsed_args and verbose: print(F'''parsed the following generate kwargs: {parsed_args}''' ) _lowercase : Tuple = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: _lowercase : List[str] = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=UpperCamelCase_ ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('Can\'t mix --fp16 and --device cpu' ) _lowercase : List[str] = generate_summaries_or_translations( UpperCamelCase_ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **UpperCamelCase_ , ) if args.reference_path is None: return {} # Compute scores _lowercase : Optional[Any] = calculate_bleu if 'translation' in args.task else calculate_rouge _lowercase : List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()] _lowercase : str = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(UpperCamelCase_ )] _lowercase : Dict = score_fn(UpperCamelCase_ , UpperCamelCase_ ) scores.update(UpperCamelCase_ ) if args.dump_args: scores.update(UpperCamelCase_ ) if args.info: _lowercase : List[str] = args.info if verbose: print(UpperCamelCase_ ) if args.score_path is not None: json.dump(UpperCamelCase_ , open(args.score_path , 'w' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
21
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float: '''simple docstring''' if mass < 0: raise ValueError("""The mass of a body cannot be negative""" ) return 0.5 * mass * abs(UpperCamelCase_ ) * abs(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
343
0
"""simple docstring""" import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> List[str]: '''simple docstring''' __snake_case : str = torch.load(UpperCamelCase_ , map_location='cpu' ) __snake_case : Tuple = chkpt['model'] # We have the base model one level deeper than the original XLM repository __snake_case : Optional[Any] = {} for k, v in state_dict.items(): if "pred_layer" in k: __snake_case : Optional[int] = v else: __snake_case : Any = v __snake_case : Any = chkpt['params'] __snake_case : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(UpperCamelCase_ , (torch.FloatTensor, numpy.ndarray) )} __snake_case : Union[str, Any] = chkpt['dico_word2id'] __snake_case : List[Any] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()} # Save pytorch-model __snake_case : Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME __snake_case : Optional[int] = pytorch_dump_folder_path + '/' + CONFIG_NAME __snake_case : List[str] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file'] print(F"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(UpperCamelCase_ , UpperCamelCase_ ) print(F"Save configuration file to {pytorch_config_dump_path}" ) with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(UpperCamelCase_ , indent=2 ) + '\n' ) print(F"Save vocab file to {pytorch_config_dump_path}" ) with open(UpperCamelCase_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(UpperCamelCase_ , indent=2 ) + '\n' ) if __name__ == "__main__": _a : Optional[int]= argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _a : Dict= parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
172
from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = """trocr""" __lowerCAmelCase = ["""past_key_values"""] __lowerCAmelCase = { """num_attention_heads""": """decoder_attention_heads""", """hidden_size""": """d_model""", """num_hidden_layers""": """decoder_layers""", } def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ): """simple docstring""" UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_ffn_dim UpperCamelCase = activation_function UpperCamelCase = max_position_embeddings UpperCamelCase = dropout UpperCamelCase = attention_dropout UpperCamelCase = activation_dropout UpperCamelCase = init_std UpperCamelCase = decoder_layerdrop UpperCamelCase = use_cache UpperCamelCase = scale_embedding UpperCamelCase = use_learned_position_embeddings UpperCamelCase = layernorm_embedding super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
343
0
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" if self.framework == "pytorch": subprocess.run( F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCamelCase_ , ) assert hasattr(self , '''env''' ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Optional[int] = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings A : List[Any] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCamelCase_ , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version='''py36''' , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" TrainingJobAnalytics(lowerCamelCase_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Union[str, Any] = self.create_estimator(lowerCamelCase_ ) # run training estimator.fit() # result dataframe A : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) A : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCamelCase_ )
3
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE = { """microsoft/swin-tiny-patch4-window7-224""": ( """https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json""" ), # See all Swin models at https://huggingface.co/models?filter=swin } class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase ): __lowerCAmelCase = """swin""" __lowerCAmelCase = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Any , lowerCamelCase_ : Optional[int]=224 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[Any]=96 , lowerCamelCase_ : int=[2, 2, 6, 2] , lowerCamelCase_ : Dict=[3, 6, 12, 24] , lowerCamelCase_ : str=7 , lowerCamelCase_ : Tuple=4.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Tuple=0.1 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=0.0_2 , lowerCamelCase_ : str=1E-5 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=None , **lowerCamelCase_ : Optional[int] , ): """simple docstring""" super().__init__(**lowerCamelCase_ ) UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = embed_dim UpperCamelCase = depths UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = num_heads UpperCamelCase = window_size UpperCamelCase = mlp_ratio UpperCamelCase = qkv_bias UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = drop_path_rate UpperCamelCase = hidden_act UpperCamelCase = use_absolute_embeddings UpperCamelCase = layer_norm_eps UpperCamelCase = initializer_range UpperCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) ) UpperCamelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(lowerCamelCase_ ) + 1 )] UpperCamelCase , UpperCamelCase = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names ) class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = version.parse("""1.11""" ) @property def lowerCamelCase_ ( self : int ): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase_ ( self : Tuple ): """simple docstring""" return 1E-4
343
0
'''simple docstring''' def lowercase_ ( _lowercase , _lowercase ) -> int: '''simple docstring''' assert x is not None assert y is not None lowerCamelCase_ : List[str] = len(UpperCamelCase_ ) lowerCamelCase_ : Any = len(UpperCamelCase_ ) # declaring the array for storing the dp values lowerCamelCase_ : Optional[Any] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1 , m + 1 ): for j in range(1 , n + 1 ): lowerCamelCase_ : List[Any] = 1 if x[i - 1] == y[j - 1] else 0 lowerCamelCase_ : Optional[Any] = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match ) lowerCamelCase_ : Any = '''''' lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = m, n while i > 0 and j > 0: lowerCamelCase_ : str = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: lowerCamelCase_ : int = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": __lowercase : int = '''AGGTAB''' __lowercase : Optional[Any] = '''GXTXAYB''' __lowercase : Any = 4 __lowercase : Optional[int] = '''GTAB''' __lowercase , __lowercase : Optional[Any] = longest_common_subsequence(a, b) print('''len =''', ln, ''', sub-sequence =''', subseq) import doctest doctest.testmod()
318
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _SCREAMING_SNAKE_CASE = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=1_2_8, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5) _SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) _SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(6_4, 6_4) ) _SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image) _SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0) _SCREAMING_SNAKE_CASE = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _SCREAMING_SNAKE_CASE = """Normal""" if result[0][0] == 1: _SCREAMING_SNAKE_CASE = """Abnormality detected"""
343
0
import numpy as np def __lowerCAmelCase ( a__ ) -> np.ndarray: return 1 / (1 + np.exp(-vector )) def __lowerCAmelCase ( a__ ) -> np.ndarray: return vector * sigmoid(UpperCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
6
from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): pass class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , lowerCamelCase_ : Any ): """simple docstring""" UpperCamelCase = data UpperCamelCase = None def __iter__( self : Optional[int] ): """simple docstring""" UpperCamelCase = self UpperCamelCase = [] while node: if node in visited: raise ContainsLoopError visited.append(lowerCamelCase_ ) yield node.data UpperCamelCase = node.next_node @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": _SCREAMING_SNAKE_CASE = Node(1) _SCREAMING_SNAKE_CASE = Node(2) _SCREAMING_SNAKE_CASE = Node(3) _SCREAMING_SNAKE_CASE = Node(4) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = root_node.next_node print(root_node.has_loop) # True _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) _SCREAMING_SNAKE_CASE = Node(5) _SCREAMING_SNAKE_CASE = Node(6) print(root_node.has_loop) # False _SCREAMING_SNAKE_CASE = Node(1) print(root_node.has_loop) # False
343
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ : List[Any] = logging.get_logger(__name__) lowerCAmelCase_ : Union[str, Any] = { 'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json', } class __SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __a ='bit' __a =['preactivation', 'bottleneck'] __a =['SAME', 'VALID'] def __init__( self : Optional[int] , __a : Union[str, Any]=3 , __a : Tuple=64 , __a : Union[str, Any]=[2_56, 5_12, 10_24, 20_48] , __a : str=[3, 4, 6, 3] , __a : str="preactivation" , __a : Dict="relu" , __a : Any=None , __a : Optional[Any]=32 , __a : Dict=0.0 , __a : List[Any]=False , __a : Any=32 , __a : int=1 , __a : Any=None , __a : str=None , **__a : Optional[Any] , ): super().__init__(**lowerCamelCase_ ) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: _a = global_padding.upper() else: raise ValueError(f'Padding strategy {global_padding} not supported' ) _a = num_channels _a = embedding_size _a = hidden_sizes _a = depths _a = layer_type _a = hidden_act _a = global_padding _a = num_groups _a = drop_path_rate _a = embedding_dynamic_padding _a = output_stride _a = width_factor _a = ["stem"] + [f'stage{idx}' for idx in range(1 , len(lowerCamelCase_ ) + 1 )] _a , _a = get_aligned_output_features_output_indices( out_features=lowerCamelCase_ , out_indices=lowerCamelCase_ , stage_names=self.stage_names )
63
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue_model_parallelism.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """roberta-large""", """instance_type""": """ml.p3dn.24xlarge""", """results""": {"""train_runtime""": 1_600, """eval_accuracy""": 0.3, """eval_loss""": 1.2}, }, ] ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=lowerCamelCase_ , ) assert hasattr(self , """env""" ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = { """enabled""": True, """processes_per_host""": 8, } UpperCamelCase = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } UpperCamelCase = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} UpperCamelCase = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase_ , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 500, } , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase_ , py_version="""py36""" , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any] ): """simple docstring""" TrainingJobAnalytics(lowerCamelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(1,)] ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = self.create_estimator(lowerCamelCase_ ) # run training estimator.fit() # result dataframe UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCamelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , lowerCamelCase_ )
343
0
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( __lowerCAmelCase ): __lowerCamelCase : Tuple = ["image_processor", "tokenizer"] __lowerCamelCase : Dict = "BlipImageProcessor" __lowerCamelCase : int = ("BertTokenizer", "BertTokenizerFast") def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Dict: _lowerCAmelCase = False super().__init__(lowerCamelCase_ , lowerCamelCase_ ) _lowerCAmelCase = self.image_processor def __call__( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = True , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> List[str]: if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: _lowerCAmelCase = self.tokenizer _lowerCAmelCase = self.tokenizer( text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , ) return text_encoding # add pixel_values _lowerCAmelCase = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ ) if text is not None: _lowerCAmelCase = self.tokenizer( text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , stride=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , return_special_tokens_mask=lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , return_length=lowerCamelCase_ , verbose=lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ , ) else: _lowerCAmelCase = None if text_encoding is not None: encoding_image_processor.update(lowerCamelCase_ ) return encoding_image_processor def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ ) def _snake_case ( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> Optional[Any]: return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ ) @property def _snake_case ( self ) -> Any: _lowerCAmelCase = self.tokenizer.model_input_names _lowerCAmelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
158
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""ConvNextFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
343
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ): lowercase__ = "sew-d" def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any]=3_2 , lowerCAmelCase_ : str=7_6_8 , lowerCAmelCase_ : Dict=1_2 , lowerCAmelCase_ : str=1_2 , lowerCAmelCase_ : str=3_0_7_2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : str=5_1_2 , lowerCAmelCase_ : Union[str, Any]=2_5_6 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Any=("p2c", "c2p") , lowerCAmelCase_ : Any="layer_norm" , lowerCAmelCase_ : Optional[Any]="gelu_python" , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.02 , lowerCAmelCase_ : Optional[Any]=1E-7 , lowerCAmelCase_ : Optional[int]=1E-5 , lowerCAmelCase_ : Union[str, Any]="group" , lowerCAmelCase_ : Optional[Any]="gelu" , lowerCAmelCase_ : int=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase_ : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Optional[Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=1_2_8 , lowerCAmelCase_ : Tuple=1_6 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=0.05 , lowerCAmelCase_ : Dict=1_0 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Tuple=0.0 , lowerCAmelCase_ : Tuple=1_0 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : Any="mean" , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=2_5_6 , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : str=1 , lowerCAmelCase_ : List[Any]=2 , **lowerCAmelCase_ : int , ): """simple docstring""" super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_) lowercase_ = hidden_size lowercase_ = feat_extract_norm lowercase_ = feat_extract_activation lowercase_ = list(lowerCamelCase_) lowercase_ = list(lowerCamelCase_) lowercase_ = list(lowerCamelCase_) lowercase_ = conv_bias lowercase_ = num_conv_pos_embeddings lowercase_ = num_conv_pos_embedding_groups lowercase_ = len(self.conv_dim) lowercase_ = num_hidden_layers lowercase_ = intermediate_size lowercase_ = squeeze_factor lowercase_ = max_position_embeddings lowercase_ = position_buckets lowercase_ = share_att_key lowercase_ = relative_attention lowercase_ = norm_rel_ebd lowercase_ = list(lowerCamelCase_) lowercase_ = hidden_act lowercase_ = num_attention_heads lowercase_ = hidden_dropout lowercase_ = attention_dropout lowercase_ = activation_dropout lowercase_ = feat_proj_dropout lowercase_ = final_dropout lowercase_ = layer_norm_eps lowercase_ = feature_layer_norm_eps lowercase_ = initializer_range lowercase_ = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase_ = apply_spec_augment lowercase_ = mask_time_prob lowercase_ = mask_time_length lowercase_ = mask_time_min_masks lowercase_ = mask_feature_prob lowercase_ = mask_feature_length lowercase_ = mask_feature_min_masks # ctc loss lowercase_ = ctc_loss_reduction lowercase_ = ctc_zero_infinity # sequence classification lowercase_ = use_weighted_layer_sum lowercase_ = classifier_proj_size @property def _UpperCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
136
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = ShapEPipeline __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = ["""prompt"""] __lowerCAmelCase = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] __lowerCAmelCase = False @property def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return 32 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return self.time_input_dim * 4 @property def lowerCamelCase_ ( self : str ): """simple docstring""" return 8 @property def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCamelCase_ ( self : Dict ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(lowerCamelCase_ ) @property def lowerCamelCase_ ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """num_attention_heads""": 2, """attention_head_dim""": 16, """embedding_dim""": self.time_input_dim, """num_embeddings""": 32, """embedding_proj_dim""": self.text_embedder_hidden_size, """time_embed_dim""": self.time_embed_dim, """num_layers""": 1, """clip_embed_dim""": self.time_input_dim * 2, """additional_embeddings""": 0, """time_embed_act_fn""": """gelu""", """norm_in_type""": """layer""", """encoder_hid_proj_type""": None, """added_emb_type""": None, } UpperCamelCase = PriorTransformer(**lowerCamelCase_ ) return model @property def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) UpperCamelCase = { """param_shapes""": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), """d_latent""": self.time_input_dim, """d_hidden""": self.renderer_dim, """n_output""": 12, """background""": ( 0.1, 0.1, 0.1, ), } UpperCamelCase = ShapERenderer(**lowerCamelCase_ ) return model def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_text_encoder UpperCamelCase = self.dummy_tokenizer UpperCamelCase = self.dummy_renderer UpperCamelCase = HeunDiscreteScheduler( beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowerCamelCase_ , clip_sample=lowerCamelCase_ , clip_sample_range=1.0 , ) UpperCamelCase = { """prior""": prior, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """renderer""": renderer, """scheduler""": scheduler, } return components def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any]=0 ): """simple docstring""" if str(lowerCamelCase_ ).startswith("""mps""" ): UpperCamelCase = torch.manual_seed(lowerCamelCase_ ) else: UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) UpperCamelCase = { """prompt""": """horse""", """generator""": generator, """num_inference_steps""": 1, """frame_size""": 32, """output_type""": """np""", } return inputs def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = """cpu""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) ) UpperCamelCase = output.images[0] UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self : Tuple ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = torch_device == """cpu""" UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**lowerCamelCase_ ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = self.get_dummy_inputs(lowerCamelCase_ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase = batch_size * [inputs[key]] UpperCamelCase = pipe(**lowerCamelCase_ , num_images_per_prompt=lowerCamelCase_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/test_shap_e_np_out.npy""" ) UpperCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" ) UpperCamelCase = pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) UpperCamelCase = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 ) UpperCamelCase = pipe( """a shark""" , generator=lowerCamelCase_ , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
343
0
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowercase_ : '''simple docstring''' def __init__( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Dict=13 , _UpperCAmelCase : str=30 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=3 , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Tuple=32 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : int=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : List[Any]=0.6 , _UpperCAmelCase : Optional[Any]=None , ): _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range _A = mask_ratio _A = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _A = (image_size // patch_size) ** 2 _A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self : List[Any] ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] ): _A = TFViTMAEModel(config=lowerCamelCase_ ) _A = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ): _A = TFViTMAEForPreTraining(lowerCamelCase_ ) _A = model(lowerCamelCase_ , training=lowerCamelCase_ ) # expected sequence length = num_patches _A = (self.image_size // self.patch_size) ** 2 _A = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _A = 1 _A = TFViTMAEForPreTraining(lowerCamelCase_ ) _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(lowerCamelCase_ , training=lowerCamelCase_ ) _A = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCAmelCase_ ( self : Dict ): _A = self.prepare_config_and_inputs() ((_A) , (_A) , (_A)) = config_and_inputs _A = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () UpperCAmelCase : int = {'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} UpperCAmelCase : Optional[int] = False UpperCAmelCase : Optional[int] = False UpperCAmelCase : Optional[int] = False UpperCAmelCase : List[Any] = False def lowerCAmelCase_ ( self : List[Any] ): _A = TFViTMAEModelTester(self ) _A = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCAmelCase_ ( self : Optional[int] ): self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def lowerCAmelCase_ ( self : str ): pass def lowerCAmelCase_ ( self : Tuple ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(lowerCamelCase_ ) _A = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCAmelCase_ ( self : str ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCAmelCase_ ( self : int ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def lowerCAmelCase_ ( self : Dict ): np.random.seed(2 ) _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = int((config.image_size // config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _A = model_class(lowerCamelCase_ ) _A = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) _A = model(lowerCamelCase_ , noise=lowerCamelCase_ ) _A = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) _A = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) _A = outputs_dict[0].numpy() _A = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCAmelCase_ ( self : List[Any] ): np.random.seed(2 ) _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = int((config.image_size // config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_UpperCAmelCase : List[Any] ): _A = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCamelCase_ ): _A = v.numpy() else: _A = np.array(lowerCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: _A = model_class(lowerCamelCase_ ) _A = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) _A = prepare_numpy_arrays(lowerCamelCase_ ) _A = model(lowerCamelCase_ , noise=lowerCamelCase_ ) _A = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ): np.random.seed(2 ) _A = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _A = tf.constant(lowerCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _A = tf_noise super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCAmelCase_ ( self : Dict ): np.random.seed(2 ) _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(lowerCamelCase_ ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCamelCase_ , '_keras_serializable' , lowerCamelCase_ ) } _A = int((config.image_size // config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _A = tf.convert_to_tensor(lowerCamelCase_ ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: _A = main_layer_class(lowerCamelCase_ ) _A = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } _A = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) ) _A = model(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: _A = os.path.join(lowerCamelCase_ , 'keras_model.h5' ) model.save(lowerCamelCase_ ) _A = tf.keras.models.load_model( lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(lowerCamelCase_ , tf.keras.Model ) _A = model(lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @slow def lowerCAmelCase_ ( self : Dict ): np.random.seed(2 ) _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = int((config.image_size // config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _A = model_class(lowerCamelCase_ ) _A = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) _A = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": _A = outputs.last_hidden_state.numpy() _A = 0 else: _A = outputs.logits.numpy() _A = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ ) _A = model_class.from_pretrained(lowerCamelCase_ ) _A = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": _A = after_outputs['last_hidden_state'].numpy() _A = 0 else: _A = after_outputs['logits'].numpy() _A = 0 _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1E-5 ) def lowerCAmelCase_ ( self : List[str] ): np.random.seed(2 ) _A , _A = self.model_tester.prepare_config_and_inputs_for_common() _A = int((config.image_size // config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: _A = model_class(lowerCamelCase_ ) _A = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) _A = model(lowerCamelCase_ , noise=lowerCamelCase_ ) _A = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCamelCase_ ) _A = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config _A = model_class.from_config(model.config ) _A = new_model(lowerCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) _A = new_model(lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowerCAmelCase_ ( self : int ): pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def lowerCAmelCase_ ( self : Optional[int] ): pass @slow def lowerCAmelCase_ ( self : Optional[int] ): _A = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(lowerCamelCase_ ) def _snake_case ( ) -> int: '''simple docstring''' _A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Dict ): return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def lowerCAmelCase_ ( self : List[str] ): np.random.seed(2 ) _A = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=lowerCamelCase_ , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _A = ViTMAEConfig() _A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _A = np.random.uniform(size=(1, num_patches) ) # forward pass _A = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) # verify the logits _A = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) _A = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
315
from __future__ import annotations def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list: '''simple docstring''' UpperCamelCase = [] UpperCamelCase , UpperCamelCase = input_list[low:mid], input_list[mid : high + 1] while left and right: result.append((left if left[0] <= right[0] else right).pop(0 ) ) UpperCamelCase = result + left + right return input_list def lowercase( UpperCamelCase_ ) -> list: '''simple docstring''' if len(UpperCamelCase_ ) <= 1: return input_list UpperCamelCase = list(UpperCamelCase_ ) # iteration for two-way merging UpperCamelCase = 2 while p <= len(UpperCamelCase_ ): # getting low, high and middle value for merge-sort of single list for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = i + p - 1 UpperCamelCase = (low + high + 1) // 2 UpperCamelCase = merge(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # final merge of last two parts if p * 2 >= len(UpperCamelCase_ ): UpperCamelCase = i UpperCamelCase = merge(UpperCamelCase_ , 0 , UpperCamelCase_ , len(UpperCamelCase_ ) - 1 ) break p *= 2 return input_list if __name__ == "__main__": _SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip() if user_input == "": _SCREAMING_SNAKE_CASE = [] else: _SCREAMING_SNAKE_CASE = [int(item.strip()) for item in user_input.split(""",""")] print(iter_merge_sort(unsorted))
343
0
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' with open(UpperCamelCase_ ) as metadata_file: _lowerCamelCase : List[Any] =json.load(UpperCamelCase_ ) _lowerCamelCase : Tuple =LukeConfig(use_entity_aware_attention=UpperCamelCase_ , **metadata['model_config'] ) # Load in the weights from the checkpoint_path _lowerCamelCase : Union[str, Any] =torch.load(UpperCamelCase_ , map_location='cpu' ) # Load the entity vocab file _lowerCamelCase : Any =load_entity_vocab(UpperCamelCase_ ) _lowerCamelCase : Dict =RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks _lowerCamelCase : Dict =AddedToken('<ent>' , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) _lowerCamelCase : List[Any] =AddedToken('<ent2>' , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(UpperCamelCase_ , UpperCamelCase_ ) _lowerCamelCase : Tuple =LukeTokenizer.from_pretrained(UpperCamelCase_ ) # Initialize the embeddings of the special tokens _lowerCamelCase : List[Any] =state_dict['embeddings.word_embeddings.weight'] _lowerCamelCase : str =word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) _lowerCamelCase : Optional[Any] =word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) _lowerCamelCase : Union[str, Any] =torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _lowerCamelCase : Tuple =F'''encoder.layer.{layer_index}.attention.self.''' _lowerCamelCase : Tuple =state_dict[prefix + matrix_name] _lowerCamelCase : List[Any] =state_dict[prefix + matrix_name] _lowerCamelCase : int =state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _lowerCamelCase : Optional[int] =state_dict['entity_embeddings.entity_embeddings.weight'] _lowerCamelCase : Optional[Any] =entity_emb[entity_vocab['[MASK]']] _lowerCamelCase : str =LukeModel(config=UpperCamelCase_ ).eval() _lowerCamelCase , _lowerCamelCase : str =model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ ) if not (len(UpperCamelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {', '.join(UpperCamelCase_ )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' F''' {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}''' ) # Check outputs _lowerCamelCase : Union[str, Any] =LukeTokenizer.from_pretrained(UpperCamelCase_ , task='entity_classification' ) _lowerCamelCase : int =( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) _lowerCamelCase : List[str] =(39, 42) _lowerCamelCase : Tuple =tokenizer(UpperCamelCase_ , entity_spans=[span] , add_prefix_space=UpperCamelCase_ , return_tensors='pt' ) _lowerCamelCase : int =model(**UpperCamelCase_ ) # Verify word hidden states if model_size == "large": _lowerCamelCase : List[Any] =torch.Size((1, 42, 1_024) ) _lowerCamelCase : List[str] =torch.tensor( [[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] ) else: # base _lowerCamelCase : List[str] =torch.Size((1, 42, 768) ) _lowerCamelCase : List[str] =torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": _lowerCamelCase : Any =torch.Size((1, 1, 1_024) ) _lowerCamelCase : Tuple =torch.tensor([[0.04_66, -0.01_06, -0.01_79]] ) else: # base _lowerCamelCase : Union[str, Any] =torch.Size((1, 1, 768) ) _lowerCamelCase : int =torch.tensor([[0.14_57, 0.10_44, 0.01_74]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(UpperCamelCase_ ) ) model.save_pretrained(UpperCamelCase_ ) def a_ ( SCREAMING_SNAKE_CASE__ : Any ): '''simple docstring''' _lowerCamelCase : Union[str, Any] ={} with open(UpperCamelCase_ , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(UpperCamelCase_ ): _lowerCamelCase , _lowerCamelCase : Dict =line.rstrip().split('\t' ) _lowerCamelCase : List[str] =index return entity_vocab if __name__ == "__main__": lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) lowerCamelCase = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
199
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=10 , lowerCamelCase_ : Optional[int]=[8, 16, 32, 64] , lowerCamelCase_ : List[str]=[1, 1, 2, 1] , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[Any]="relu" , lowerCamelCase_ : List[Any]=3 , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase_ : Optional[Any]=[2, 3, 4] , lowerCamelCase_ : List[Any]=1 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) UpperCamelCase = out_features UpperCamelCase = out_indices UpperCamelCase = num_groups def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] ): """simple docstring""" UpperCamelCase = BitModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = BitForImageClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int ): """simple docstring""" UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None UpperCamelCase = None UpperCamelCase = BitBackbone(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() UpperCamelCase = model(lowerCamelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __lowerCAmelCase = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""Bit does not output attentions""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""Bit does not use inputs_embeds""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass @unittest.skip(reason="""Bit does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(config=lowerCamelCase_ ) for name, module in model.named_modules(): if isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) def lowerCamelCase_ ( self : int ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): UpperCamelCase = model_class(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() with torch.no_grad(): UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""preactivation""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip(reason="""Bit does not use feedforward chunking""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" pass def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : int ): """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = BitModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase_ ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ ) # forward pass with torch.no_grad(): UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1E-4 ) ) @require_torch class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (BitBackbone,) if is_torch_available() else () __lowerCAmelCase = BitConfig __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = BitModelTester(self )
343
0
"""simple docstring""" def lowercase ( _snake_case : Optional[int] , _snake_case : List[str] ) ->int: """simple docstring""" if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError('''String lengths must match!''' ) __snake_case : Union[str, Any] = 0 for chara, chara in zip(UpperCamelCase_ , UpperCamelCase_ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
102
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Optional[int] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=3 , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : List[str]=[10, 20, 30, 40] , lowerCamelCase_ : Tuple=[1, 1, 2, 1] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=True , lowerCamelCase_ : Tuple="relu" , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = num_channels UpperCamelCase = embeddings_size UpperCamelCase = hidden_sizes UpperCamelCase = depths UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_act UpperCamelCase = num_labels UpperCamelCase = scope UpperCamelCase = len(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ): """simple docstring""" UpperCamelCase = TFResNetModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ): """simple docstring""" UpperCamelCase = self.num_labels UpperCamelCase = TFResNetForImageClassification(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __lowerCAmelCase = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = TFResNetModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return @unittest.skip(reason="""ResNet does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" pass @unittest.skip(reason="""ResNet does not support input and output embeddings""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" pass def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" def check_hidden_states_output(lowerCamelCase_ : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ): UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = ["""basic""", """bottleneck"""] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCamelCase = layer_type UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Any ): """simple docstring""" for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase = TFResNetModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # forward pass UpperCamelCase = model(**lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , lowerCamelCase_ , atol=1E-4 ) )
343
0
from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent SCREAMING_SNAKE_CASE : List[Any] = {"UserAgent": UserAgent().random} def UpperCamelCase_( lowerCamelCase_ ) -> dict: _lowercase : str = script.contents[0] _lowercase : Tuple = json.loads(data[data.find('{\"config\"' ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class _lowerCamelCase: def __init__( self, lowerCamelCase) -> Tuple: """simple docstring""" _lowercase : Tuple = F'''https://www.instagram.com/{username}/''' _lowercase : Tuple = self.get_json() def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" _lowercase : int = requests.get(self.url, headers=lowerCamelCase_).text _lowercase : List[str] = BeautifulSoup(lowerCamelCase_, 'html.parser').find_all('script') try: return extract_user_profile(scripts[4]) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3]) def __repr__( self) -> Union[str, Any]: """simple docstring""" return F'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self) -> Dict: """simple docstring""" return F'''{self.fullname} ({self.username}) is {self.biography}''' @property def UpperCamelCase ( self) -> Tuple: """simple docstring""" return self.user_data["username"] @property def UpperCamelCase ( self) -> str: """simple docstring""" return self.user_data["full_name"] @property def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" return self.user_data["biography"] @property def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" return self.user_data["business_email"] @property def UpperCamelCase ( self) -> Dict: """simple docstring""" return self.user_data["external_url"] @property def UpperCamelCase ( self) -> List[str]: """simple docstring""" return self.user_data["edge_followed_by"]["count"] @property def UpperCamelCase ( self) -> Tuple: """simple docstring""" return self.user_data["edge_follow"]["count"] @property def UpperCamelCase ( self) -> Any: """simple docstring""" return self.user_data["edge_owner_to_timeline_media"]["count"] @property def UpperCamelCase ( self) -> List[Any]: """simple docstring""" return self.user_data["profile_pic_url_hd"] @property def UpperCamelCase ( self) -> Dict: """simple docstring""" return self.user_data["is_verified"] @property def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" return self.user_data["is_private"] def UpperCamelCase_( lowerCamelCase_ = "github" ) -> None: import os if os.environ.get('CI' ): return # test failing on GitHub Actions _lowercase : Optional[Any] = InstagramUser(UpperCamelCase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , UpperCamelCase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 150 assert instagram_user.number_of_followers > 12_0000 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "[email protected]" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith('https://instagram.' ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE : List[Any] = InstagramUser("github") print(instagram_user) print(F"{instagram_user.number_of_posts = }") print(F"{instagram_user.number_of_followers = }") print(F"{instagram_user.number_of_followings = }") print(F"{instagram_user.email = }") print(F"{instagram_user.website = }") print(F"{instagram_user.profile_picture_url = }") print(F"{instagram_user.is_verified = }") print(F"{instagram_user.is_private = }")
21
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) _SCREAMING_SNAKE_CASE = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', F'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', F'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: '''simple docstring''' UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val def lowercase( UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" ) UpperCamelCase = value else: UpperCamelCase = value return new_state_dict def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]: '''simple docstring''' UpperCamelCase = """""" if is_panoptic: UpperCamelCase = """conditional_detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict UpperCamelCase = in_proj_weight[:256, :] UpperCamelCase = in_proj_bias[:256] UpperCamelCase = in_proj_weight[256:512, :] UpperCamelCase = in_proj_bias[256:512] UpperCamelCase = in_proj_weight[-256:, :] UpperCamelCase = in_proj_bias[-256:] def lowercase( ) -> Any: '''simple docstring''' UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ) return im @torch.no_grad() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any: '''simple docstring''' UpperCamelCase = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: UpperCamelCase = """resnet101""" if "dc5" in model_name: UpperCamelCase = True UpperCamelCase = """panoptic""" in model_name if is_panoptic: UpperCamelCase = 250 else: UpperCamelCase = 91 UpperCamelCase = """huggingface/label-files""" UpperCamelCase = """coco-detection-id2label.json""" UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} UpperCamelCase = idalabel UpperCamelCase = {v: k for k, v in idalabel.items()} # load image processor UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection""" UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ ) # prepare image UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" ) UpperCamelCase = encoding["""pixel_values"""] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval() UpperCamelCase = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: UpperCamelCase = """conditional_detr.""" + src rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) UpperCamelCase = rename_backbone_keys(UpperCamelCase_ ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""conditional_detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): UpperCamelCase = state_dict.pop(UpperCamelCase_ ) UpperCamelCase = val # finally, create HuggingFace model and load state dict UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) model.eval() model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" ) # verify our conversion UpperCamelCase = conditional_detr(UpperCamelCase_ ) UpperCamelCase = model(UpperCamelCase_ ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) model.save_pretrained(UpperCamelCase_ ) image_processor.save_pretrained(UpperCamelCase_ ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
343
0
"""simple docstring""" import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _a : List[Any]= models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="relu")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="relu")) classifier.add(layers.Dense(units=1, activation="sigmoid")) # Compiling the CNN classifier.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _a : Any= tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _a : List[Any]= tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) _a : Tuple= train_datagen.flow_from_directory( "dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) _a : Optional[int]= test_datagen.flow_from_directory( "dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("cnn.h5") # Part 3 - Making new predictions _a : Any= tf.keras.preprocessing.image.load_img( "dataset/single_prediction/image.png", target_size=(64, 64) ) _a : Union[str, Any]= tf.keras.preprocessing.image.img_to_array(test_image) _a : List[str]= np.expand_dims(test_image, axis=0) _a : Dict= classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _a : Union[str, Any]= "Normal" if result[0][0] == 1: _a : Union[str, Any]= "Abnormality detected"
172
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : str=30 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=3 , lowerCamelCase_ : Any=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : int=4 , lowerCamelCase_ : str=37 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : List[Any]=10 , lowerCamelCase_ : List[Any]=0.0_2 , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[Any]=0.6 , lowerCamelCase_ : Optional[Any]=None , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = image_size UpperCamelCase = patch_size UpperCamelCase = num_channels UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = mask_ratio UpperCamelCase = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) UpperCamelCase = (image_size // patch_size) ** 2 UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModel(config=lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) # expected sequence length = num_patches UpperCamelCase = (self.image_size // self.patch_size) ** 2 UpperCamelCase = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images UpperCamelCase = 1 UpperCamelCase = TFViTMAEForPreTraining(lowerCamelCase_ ) UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase = model(lowerCamelCase_ , training=lowerCamelCase_ ) UpperCamelCase = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs UpperCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __lowerCAmelCase = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {} __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False __lowerCAmelCase = False def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" UpperCamelCase = TFViTMAEModelTester(self ) UpperCamelCase = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def lowerCamelCase_ ( self : str ): """simple docstring""" pass def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase = [*signature.parameters.keys()] UpperCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = outputs_dict[0].numpy() UpperCamelCase = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def lowerCamelCase_ ( self : List[Any] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(lowerCamelCase_ : List[Any] ): UpperCamelCase = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCamelCase_ ): UpperCamelCase = v.numpy() else: UpperCamelCase = np.array(lowerCamelCase_ ) return inputs_np_dict for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = prepare_numpy_arrays(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.constant(lowerCamelCase_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument UpperCamelCase = tf_noise super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(lowerCamelCase_ ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(lowerCamelCase_ , lowerCamelCase_ ),) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCamelCase_ , """_keras_serializable""" , lowerCamelCase_ ) } UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) UpperCamelCase = tf.convert_to_tensor(lowerCamelCase_ ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: UpperCamelCase = main_layer_class(lowerCamelCase_ ) UpperCamelCase = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } UpperCamelCase = tf.keras.Model(lowerCamelCase_ , outputs=main_layer(lowerCamelCase_ ) ) UpperCamelCase = model(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = os.path.join(lowerCamelCase_ , """keras_model.h5""" ) model.save(lowerCamelCase_ ) UpperCamelCase = tf.keras.models.load_model( lowerCamelCase_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(lowerCamelCase_ , tf.keras.Model ) UpperCamelCase = model(lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Dict ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = outputs.last_hidden_state.numpy() UpperCamelCase = 0 else: UpperCamelCase = outputs.logits.numpy() UpperCamelCase = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase_ , saved_model=lowerCamelCase_ ) UpperCamelCase = model_class.from_pretrained(lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) if model_class.__name__ == "TFViTMAEModel": UpperCamelCase = after_outputs["""last_hidden_state"""].numpy() UpperCamelCase = 0 else: UpperCamelCase = after_outputs["""logits"""].numpy() UpperCamelCase = 0 UpperCamelCase = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCamelCase_ , 1E-5 ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() UpperCamelCase = int((config.image_size // config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: UpperCamelCase = model_class(lowerCamelCase_ ) UpperCamelCase = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) UpperCamelCase = model(lowerCamelCase_ , noise=lowerCamelCase_ ) UpperCamelCase = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCamelCase_ ) UpperCamelCase = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config UpperCamelCase = model_class.from_config(model.config ) UpperCamelCase = new_model(lowerCamelCase_ ) # Build model new_model.set_weights(model.get_weights() ) UpperCamelCase = new_model(lowerCamelCase_ , noise=lowerCamelCase_ ) self.assert_outputs_same(lowerCamelCase_ , lowerCamelCase_ ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def lowerCamelCase_ ( self : int ): """simple docstring""" pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" pass @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(lowerCamelCase_ ) def lowercase( ) -> int: '''simple docstring''' UpperCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def lowerCamelCase_ ( self : Dict ): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : List[str] ): """simple docstring""" np.random.seed(2 ) UpperCamelCase = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) UpperCamelCase = self.default_image_processor UpperCamelCase = prepare_img() UpperCamelCase = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) UpperCamelCase = ViTMAEConfig() UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) UpperCamelCase = np.random.uniform(size=(1, num_patches) ) # forward pass UpperCamelCase = model(**lowerCamelCase_ , noise=lowerCamelCase_ ) # verify the logits UpperCamelCase = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) UpperCamelCase = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
343
0