code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def A ( snake_case__ : Optional[int] , snake_case__ : List[str] ) -> List[str]: '''simple docstring''' if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer __snake_case = flax_key_tuple[:-1] + ('weight',) __snake_case = torch.permute(snake_case__ , (0, 2, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ): # linear layer __snake_case = flax_key_tuple[:-1] + ('weight',) __snake_case = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: __snake_case = flax_key_tuple[:-1] + ('weight',) return flax_key_tuple, flax_tensor def A ( snake_case__ : Tuple , snake_case__ : Any , snake_case__ : int ) -> Dict: '''simple docstring''' if "metadata" in layer: __snake_case = layer.split('metadata' ) __snake_case = ''.join(split_layer[0] )[:-1] __snake_case = [tuple(('metadata' + split_layer[1]).split('/' ) )] elif "kvstore" in layer: __snake_case = layer.split('kvstore' ) __snake_case = ''.join(split_layer[0] )[:-1] __snake_case = [tuple(('kvstore' + split_layer[1]).split('/' ) )] else: __snake_case = layer.split('/' ) __snake_case = '/'.join(split_layer[:-1] ) __snake_case = (split_layer[-1],) if "kvstore/path" in layer: __snake_case = f"{switch_checkpoint_path}/{checkpoint_info[layer]}" elif "kvstore/driver" in layer: __snake_case = 'file' else: __snake_case = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def A ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ) -> int: '''simple docstring''' __snake_case = rename_keys(snake_case__ ) __snake_case = {} for k, v in current_block.items(): __snake_case = v __snake_case = new_current_block torch.save(snake_case__ , snake_case__ ) def A ( snake_case__ : str , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : str = WEIGHTS_NAME ) -> Optional[int]: '''simple docstring''' __snake_case = convert_file_size_to_int(snake_case__ ) __snake_case = [] __snake_case = {} __snake_case = 0 __snake_case = 0 os.makedirs(snake_case__ , exist_ok=snake_case__ ) with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb' ) as fp: __snake_case = serialization.msgpack_restore(fp.read() )['optimizer']['target'] __snake_case = flatten_dict(snake_case__ , sep='/' ) __snake_case = {} for layer in checkpoint_info.keys(): __snake_case , __snake_case , __snake_case = get_key_and_tensorstore_dict( snake_case__ , snake_case__ , snake_case__ ) if curr_real_layer_name in all_layers: __snake_case = content else: __snake_case = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file __snake_case = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result() __snake_case = torch.tensor(snake_case__ ) __snake_case = raw_weights.numel() * dtype_byte_size(raw_weights.dtype ) # use the renaming pattern from the small conversion scripts __snake_case , __snake_case = rename_base_flax_keys(tuple(key.split('/' ) ) , snake_case__ ) __snake_case = '/'.join(snake_case__ ) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: __snake_case = os.path.join( snake_case__ , weights_name.replace('.bin' , f"-{len(snake_case__ )+1:05d}-of-???.bin" ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) del current_block __snake_case = {} __snake_case = 0 __snake_case = raw_weights.to(getattr(snake_case__ , snake_case__ ) ) current_block_size += weight_size total_size += weight_size # Add the last block __snake_case = os.path.join(snake_case__ , weights_name.replace('.bin' , f"-{len(snake_case__ )+1:05d}-of-???.bin" ) ) rename_and_save_block(snake_case__ , snake_case__ ) sharded_state_dicts.append(current_block.keys() ) # If we only have one shard, we return it if len(snake_case__ ) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index __snake_case = {} __snake_case = {} for idx, shard in enumerate(snake_case__ ): __snake_case = weights_name.replace( '.bin' , f"-{idx+1:05d}-of-{len(snake_case__ ):05d}.bin" ) # len(sharded_state_dicts):05d} __snake_case = os.path.join(snake_case__ , weights_name.replace('.bin' , f"-{idx+1:05d}-of-???.bin" ) ) os.rename(snake_case__ , os.path.join(snake_case__ , snake_case__ ) ) __snake_case = shard for key in shard: __snake_case = shard_file # Add the metadata __snake_case = {'total_size': total_size} __snake_case = {'metadata': metadata, 'weight_map': weight_map} with open(os.path.join(snake_case__ , snake_case__ ) , 'w' , encoding='utf-8' ) as f: __snake_case = json.dumps(snake_case__ , indent=2 , sort_keys=snake_case__ ) + '\n' f.write(snake_case__ ) return metadata, index if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) UpperCAmelCase__ : Optional[Any] = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def A ( ) -> Optional[int]: '''simple docstring''' from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer __snake_case = SwitchTransformersConfig.from_pretrained('google/switch-base-8' ) config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' ) __snake_case = SwitchTransformersForConditionalGeneration.from_pretrained( '/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto' ) __snake_case = TaTokenizer.from_pretrained('t5-small' ) __snake_case = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.' __snake_case = tokenizer(snake_case__ , return_tensors='pt' ).input_ids __snake_case = model.generate(snake_case__ , decoder_start_token_id=0 ) print(tokenizer.decode(out[0] ) )
676
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def A ( *snake_case__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' with open(snake_case__ , 'r' ) as fh: fcntl.flock(snake_case__ , fcntl.LOCK_EX ) try: print(*snake_case__ ) finally: fcntl.flock(snake_case__ , fcntl.LOCK_UN ) UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) UpperCAmelCase__ : Any = torch.device("cuda", local_rank) UpperCAmelCase__ : Union[str, Any] = socket.gethostname() UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank UpperCAmelCase__ : Optional[int] = dist.get_rank() UpperCAmelCase__ : List[str] = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
676
1
import sys UpperCAmelCase__ : Tuple = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def A ( snake_case__ : str = N ) -> int: '''simple docstring''' __snake_case = -sys.maxsize - 1 for i in range(len(snake_case__ ) - 12 ): __snake_case = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: __snake_case = product return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
676
from datetime import datetime import requests def A ( snake_case__ : str ) -> bytes: '''simple docstring''' __snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' __snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(snake_case__ ).content if __name__ == "__main__": UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip() UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
676
1
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys UpperCAmelCase__ : int = "3" print("Python version:", sys.version) print("OS platform:", platform.platform()) print("OS architecture:", platform.machine()) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) except ImportError: print("Torch version:", None) try: import transformers print("transformers version:", transformers.__version__) except ImportError: print("transformers version:", None)
676
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __lowercase : def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]: __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = scope def _a ( self) -> Union[str, Any]: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length]) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __snake_case = ids_tensor([self.batch_size] , self.num_choices) __snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , ) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]: __snake_case = OpenLlamaModel(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_) __snake_case = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]: __snake_case = True __snake_case = OpenLlamaModel(lowercase_) model.to(lowercase_) model.eval() __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , ) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , ) __snake_case = model(lowercase_ , attention_mask=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str: __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]: __snake_case = True __snake_case = True __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() # first forward pass __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , ) __snake_case = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size) __snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and __snake_case = torch.cat([input_ids, next_tokens] , dim=-1) __snake_case = torch.cat([input_mask, next_mask] , dim=-1) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] # select random slice __snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item() __snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3)) def _a ( self) -> Optional[Any]: __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else () __UpperCAmelCase = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False def _a ( self) -> Tuple: __snake_case = OpenLlamaModelTester(self) __snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7) def _a ( self) -> int: self.config_tester.run_common_tests() def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'single_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> int: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'multi_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def _a ( self) -> List[Any]: pass @parameterized.expand([('linear',), ('dynamic',)]) def _a ( self , lowercase_) -> Optional[Any]: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = ids_tensor([1, 1_0] , config.vocab_size) __snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = OpenLlamaModel(lowercase_) original_model.to(lowercase_) original_model.eval() __snake_case = original_model(lowercase_).last_hidden_state __snake_case = original_model(lowercase_).last_hidden_state set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = {'type': scaling_type, 'factor': 10.0} __snake_case = OpenLlamaModel(lowercase_) scaled_model.to(lowercase_) scaled_model.eval() __snake_case = scaled_model(lowercase_).last_hidden_state __snake_case = scaled_model(lowercase_).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) else: self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
676
1
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = CustomTokenizer pass
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if number < 0: return False __snake_case = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase__ : Union[str, Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Tuple = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[str] = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
676
import numpy as np def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return vector * sigmoid(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class __lowercase ( lowerCamelCase__ ): def _a ( self) -> str: return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _a ( self) -> Optional[int]: __snake_case = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']} return Dataset.from_dict(lowercase_) def _a ( self) -> Tuple: __snake_case = self._create_example_records() __snake_case = Dataset.from_list(lowercase_) self.assertListEqual(dset.column_names , ['col_1', 'col_2']) for i, r in enumerate(lowercase_): self.assertDictEqual(lowercase_ , example_records[i]) def _a ( self) -> Optional[Any]: __snake_case = self._create_example_records() __snake_case = Dataset.from_list(lowercase_) __snake_case = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info , dset_from_dict.info) def _a ( self) -> int: # checks what happens with missing columns __snake_case = [{'col_1': 1}, {'col_2': 'x'}] __snake_case = Dataset.from_list(lowercase_) self.assertDictEqual(dset[0] , {'col_1': 1}) self.assertDictEqual(dset[1] , {'col_1': None}) # NB: first record is used for columns def _a ( self) -> List[Any]: # checks if the type can be inferred from the second record __snake_case = [{'col_1': []}, {'col_1': [1, 2]}] __snake_case = Dataset.from_list(lowercase_) self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64'))) def _a ( self) -> Optional[Any]: __snake_case = Dataset.from_list([]) self.assertEqual(len(lowercase_) , 0) self.assertListEqual(dset.column_names , [])
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True __snake_case = 4 __snake_case = (1 << p) - 1 for _ in range(p - 2 ): __snake_case = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
676
1
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = 42 class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , lowercase_ = 1_6 , lowercase_ = 8_8 , lowercase_ = None , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = 3_2 , lowercase_ = None , lowercase_ = False , lowercase_ = None , lowercase_ = "geglu" , lowercase_ = True , lowercase_ = True , ) -> List[Any]: super().__init__() __snake_case = num_attention_heads __snake_case = attention_head_dim __snake_case = num_attention_heads * attention_head_dim __snake_case = in_channels __snake_case = torch.nn.GroupNorm(num_groups=lowercase_ , num_channels=lowercase_ , eps=1e-6 , affine=lowercase_) __snake_case = nn.Linear(lowercase_ , lowercase_) # 3. Define transformers blocks __snake_case = nn.ModuleList( [ BasicTransformerBlock( lowercase_ , lowercase_ , lowercase_ , dropout=lowercase_ , cross_attention_dim=lowercase_ , activation_fn=lowercase_ , attention_bias=lowercase_ , double_self_attention=lowercase_ , norm_elementwise_affine=lowercase_ , ) for d in range(lowercase_) ]) __snake_case = nn.Linear(lowercase_ , lowercase_) def _a ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=1 , lowercase_=None , lowercase_ = True , ) -> List[str]: __snake_case , __snake_case , __snake_case , __snake_case = hidden_states.shape __snake_case = batch_frames // num_frames __snake_case = hidden_states __snake_case = hidden_states[None, :].reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) __snake_case = hidden_states.permute(0 , 2 , 1 , 3 , 4) __snake_case = self.norm(lowercase_) __snake_case = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , lowercase_ , lowercase_) __snake_case = self.proj_in(lowercase_) # 2. Blocks for block in self.transformer_blocks: __snake_case = block( lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ , cross_attention_kwargs=lowercase_ , class_labels=lowercase_ , ) # 3. Output __snake_case = self.proj_out(lowercase_) __snake_case = ( hidden_states[None, None, :] .reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) .permute(0 , 3 , 4 , 1 , 2) .contiguous() ) __snake_case = hidden_states.reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_) __snake_case = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=lowercase_)
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"] UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Any = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCAmelCase__ : str = logging.get_logger(__name__) class __lowercase : def __init__( self , lowercase_ = None , lowercase_ = None , lowercase_=None , lowercase_=None) -> Union[str, Any]: if not conversation_id: __snake_case = uuid.uuida() if past_user_inputs is None: __snake_case = [] if generated_responses is None: __snake_case = [] __snake_case = conversation_id __snake_case = past_user_inputs __snake_case = generated_responses __snake_case = text def __eq__( self , lowercase_) -> Optional[int]: if not isinstance(lowercase_ , lowercase_): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def _a ( self , lowercase_ , lowercase_ = False) -> Optional[Any]: if self.new_user_input: if overwrite: logger.warning( F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten " F"with: \"{text}\".") __snake_case = text else: logger.warning( F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input " F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input") else: __snake_case = text def _a ( self) -> int: if self.new_user_input: self.past_user_inputs.append(self.new_user_input) __snake_case = None def _a ( self , lowercase_) -> Union[str, Any]: self.generated_responses.append(lowercase_) def _a ( self) -> List[Any]: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self) -> List[Any]: __snake_case = F"Conversation id: {self.uuid} \n" for is_user, text in self.iter_texts(): __snake_case = 'user' if is_user else 'bot' output += F"{name} >> {text} \n" return output @add_end_docstrings( lowerCamelCase__ , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class __lowercase ( lowerCamelCase__ ): def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]: super().__init__(*lowercase_ , **lowercase_) if self.tokenizer.pad_token_id is None: __snake_case = self.tokenizer.eos_token def _a ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_) -> Any: __snake_case = {} __snake_case = {} __snake_case = {} if min_length_for_response is not None: __snake_case = min_length_for_response if minimum_tokens is not None: __snake_case = minimum_tokens if "max_length" in generate_kwargs: __snake_case = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __snake_case = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(lowercase_) return preprocess_params, forward_params, postprocess_params def __call__( self , lowercase_ , lowercase_=0 , **lowercase_) -> Union[str, Any]: __snake_case = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_) if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1: return outputs[0] return outputs def _a ( self , lowercase_ , lowercase_=3_2) -> Dict[str, Any]: if not isinstance(lowercase_ , lowercase_): raise ValueError('ConversationalPipeline, expects Conversation as inputs') if conversation.new_user_input is None: raise ValueError( F"Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. " 'Add user inputs with the conversation\'s `add_user_input` method') if hasattr(self.tokenizer , '_build_conversation_input_ids'): __snake_case = self.tokenizer._build_conversation_input_ids(lowercase_) else: # If the tokenizer cannot handle conversations, we default to only the old version __snake_case = self._legacy_parse_and_tokenize(lowercase_) if self.framework == "pt": __snake_case = torch.LongTensor([input_ids]) elif self.framework == "tf": __snake_case = tf.constant([input_ids]) return {"input_ids": input_ids, "conversation": conversation} def _a ( self , lowercase_ , lowercase_=1_0 , **lowercase_) -> int: __snake_case = generate_kwargs.get('max_length' , self.model.config.max_length) __snake_case = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})") __snake_case = max_length - minimum_tokens __snake_case = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: __snake_case = model_inputs['attention_mask'][:, -trim:] __snake_case = model_inputs.pop('conversation') __snake_case = max_length __snake_case = self.model.generate(**lowercase_ , **lowercase_) if self.model.config.is_encoder_decoder: __snake_case = 1 else: __snake_case = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def _a ( self , lowercase_ , lowercase_=True) -> Dict: __snake_case = model_outputs['output_ids'] __snake_case = self.tokenizer.decode( output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , ) __snake_case = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(lowercase_) return conversation def _a ( self , lowercase_) -> Dict: __snake_case = self.tokenizer.eos_token_id __snake_case = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id]) else: input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)) if len(lowercase_) > self.tokenizer.model_max_length: __snake_case = input_ids[-self.tokenizer.model_max_length :] return input_ids
676
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
676
1
import os def A ( ) -> List[str]: '''simple docstring''' __snake_case = os.path.dirname(os.path.realpath(snake_case__ ) ) __snake_case = os.path.join(snake_case__ , 'triangle.txt' ) with open(snake_case__ ) as f: __snake_case = f.readlines() __snake_case = [] for line in triangle: __snake_case = [] for number in line.strip().split(' ' ): numbers_from_line.append(int(snake_case__ ) ) a.append(snake_case__ ) for i in range(1 , len(snake_case__ ) ): for j in range(len(a[i] ) ): __snake_case = a[i - 1][j] if j != len(a[i - 1] ) else 0 __snake_case = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(snake_case__ , snake_case__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
676
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def A ( snake_case__ : List[Any] ) -> Any: '''simple docstring''' __snake_case = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __snake_case = 4 __snake_case = 48 __snake_case = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = [6, 6, 6, 6] __snake_case = 60 __snake_case = [6, 6, 6, 6] __snake_case = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = 4 __snake_case = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __snake_case = 1 __snake_case = 1 __snake_case = 126 __snake_case = 7 __snake_case = 255.0 __snake_case = '' return config def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: __snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: __snake_case = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: __snake_case = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: __snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __snake_case = name.replace('attn' , 'attention.self' ) if "norm1" in name: __snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: __snake_case = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: __snake_case = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: __snake_case = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: __snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: __snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": __snake_case = 'layernorm.weight' if name == "norm.bias": __snake_case = 'layernorm.bias' if "conv_first" in name: __snake_case = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __snake_case = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: __snake_case = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: __snake_case = name.replace('upsample.2' , 'upsample.convolution_1' ) __snake_case = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": __snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) __snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: __snake_case = 'swin2sr.' + name return name def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): __snake_case = orig_state_dict.pop(snake_case__ ) if "qkv" in key: __snake_case = key.split('.' ) __snake_case = int(key_split[1] ) __snake_case = int(key_split[4] ) __snake_case = config.embed_dim if "weight" in key: __snake_case = val[:dim, :] __snake_case = val[dim : dim * 2, :] __snake_case = val[-dim:, :] else: __snake_case = val[:dim] __snake_case = val[dim : dim * 2] __snake_case = val[-dim:] pass else: __snake_case = val return orig_state_dict def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple: '''simple docstring''' __snake_case = get_config(snake_case__ ) __snake_case = SwinaSRForImageSuperResolution(snake_case__ ) model.eval() __snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' ) __snake_case = convert_state_dict(snake_case__ , snake_case__ ) __snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0: raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"Unexpected key {key} in state_dict" ) # verify values __snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' __snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) __snake_case = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __snake_case = 126 if 'Jpeg' in checkpoint_url else 256 __snake_case = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __snake_case = transforms(snake_case__ ).unsqueeze(0 ) if config.num_channels == 1: __snake_case = pixel_values[:, 0, :, :].unsqueeze(1 ) __snake_case = model(snake_case__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 ) print('Looks ok!' ) __snake_case = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } __snake_case = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(snake_case__ ) if push_to_hub: model.push_to_hub(f"caidas/{model_name}" ) processor.push_to_hub(f"caidas/{model_name}" ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") UpperCAmelCase__ : Optional[Any] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
676
1
def A ( snake_case__ : int ) -> int: '''simple docstring''' __snake_case = abs(snake_case__ ) __snake_case = 0 while n > 0: res += n % 10 n //= 10 return res def A ( snake_case__ : int ) -> int: '''simple docstring''' __snake_case = abs(snake_case__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def A ( snake_case__ : int ) -> int: '''simple docstring''' return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) ) def A ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(snake_case__ : Callable , snake_case__ : int ) -> None: __snake_case = f"{func.__name__}({value})" __snake_case = timeit(f"__main__.{call}" , setup='import __main__' ) print(f"{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds" ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(snake_case__ , snake_case__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ : int = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger UpperCAmelCase__ : Union[str, Any] = "<<<<<<< This should probably be modified because it mentions: " UpperCAmelCase__ : Tuple = "=======\n>>>>>>>\n" UpperCAmelCase__ : Tuple = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] UpperCAmelCase__ : List[str] = [ # (pattern, replacement) # Order is important here for some replacements (r"tfds\.core", r"datasets"), (r"tf\.io\.gfile\.GFile", r"open"), (r"tf\.([\w\d]+)", r"datasets.Value('\1')"), (r"tfds\.features\.Text\(\)", r"datasets.Value('string')"), (r"tfds\.features\.Text\(", r"datasets.Value('string'),"), (r"features\s*=\s*tfds.features.FeaturesDict\(", r"features=datasets.Features("), (r"tfds\.features\.FeaturesDict\(", r"dict("), (r"The TensorFlow Datasets Authors", r"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (r"tfds\.", r"datasets."), (r"dl_manager\.manual_dir", r"self.config.data_dir"), (r"self\.builder_config", r"self.config"), ] def A ( snake_case__ : Namespace ) -> Union[str, Any]: '''simple docstring''' return ConvertCommand(args.tfds_path , args.datasets_directory ) class __lowercase ( lowerCamelCase__ ): @staticmethod def _a ( lowercase_) -> int: __snake_case = parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=lowercase_ , required=lowercase_ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=lowercase_ , required=lowercase_ , help='Path to the HuggingFace Datasets folder.') train_parser.set_defaults(func=lowercase_) def __init__( self , lowercase_ , lowercase_ , *lowercase_) -> Any: __snake_case = get_logger('datasets-cli/converting') __snake_case = tfds_path __snake_case = datasets_directory def _a ( self) -> Dict: if os.path.isdir(self._tfds_path): __snake_case = os.path.abspath(self._tfds_path) elif os.path.isfile(self._tfds_path): __snake_case = os.path.dirname(self._tfds_path) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.') __snake_case = os.path.abspath(self._datasets_directory) self._logger.info(F"Converting datasets from {abs_tfds_path} to {abs_datasets_path}") __snake_case = [] __snake_case = [] __snake_case = {} if os.path.isdir(self._tfds_path): __snake_case = os.listdir(lowercase_) else: __snake_case = [os.path.basename(self._tfds_path)] for f_name in file_names: self._logger.info(F"Looking at file {f_name}") __snake_case = os.path.join(lowercase_ , lowercase_) __snake_case = os.path.join(lowercase_ , lowercase_) if not os.path.isfile(lowercase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file') continue with open(lowercase_ , encoding='utf-8') as f: __snake_case = f.readlines() __snake_case = [] __snake_case = False __snake_case = False __snake_case = [] for line in lines: __snake_case = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: __snake_case = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here __snake_case = '' continue elif "from absl import logging" in out_line: __snake_case = 'from datasets import logging\n' elif "getLogger" in out_line: __snake_case = out_line.replace('getLogger' , 'get_logger') elif any(expression in out_line for expression in TO_HIGHLIGHT): __snake_case = True __snake_case = list(filter(lambda lowercase_: e in out_line , lowercase_)) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowercase_) + '\n') out_lines.append(lowercase_) out_lines.append(lowercase_) continue else: for pattern, replacement in TO_CONVERT: __snake_case = re.sub(lowercase_ , lowercase_ , lowercase_) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: __snake_case = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowercase_) tfds_imports.extend(imp.strip() for imp in match.group(1).split(',')) __snake_case = 'from . import ' + match.group(1) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(F"Error converting {out_line.strip()}") if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: __snake_case = True out_lines.append(lowercase_) if is_builder or "wmt" in f_name: # We create a new directory for each dataset __snake_case = f_name.replace('.py' , '') __snake_case = os.path.join(lowercase_ , lowercase_) __snake_case = os.path.join(lowercase_ , lowercase_) os.makedirs(lowercase_ , exist_ok=lowercase_) self._logger.info(F"Adding directory {output_dir}") imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) else: # Utilities will be moved at the end utils_files.append(lowercase_) if needs_manual_update: with_manual_update.append(lowercase_) with open(lowercase_ , 'w' , encoding='utf-8') as f: f.writelines(lowercase_) self._logger.info(F"Converted in {output_file}") for utils_file in utils_files: try: __snake_case = os.path.basename(lowercase_) __snake_case = imports_to_builder_map[f_name.replace('.py' , '')] self._logger.info(F"Moving {dest_folder} to {utils_file}") shutil.copy(lowercase_ , lowercase_) except KeyError: self._logger.error(F"Cannot find destination folder for {utils_file}. Please copy manually.") if with_manual_update: for file_path in with_manual_update: self._logger.warning( F"You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.")
676
from __future__ import annotations class __lowercase : def __init__( self , lowercase_) -> None: __snake_case = data __snake_case = None __snake_case = None def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def A ( snake_case__ : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def A ( snake_case__ : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def A ( ) -> None: # Main function for testing. '''simple docstring''' __snake_case = Node(1 ) __snake_case = Node(2 ) __snake_case = Node(3 ) __snake_case = Node(4 ) __snake_case = Node(5 ) __snake_case = Node(6 ) __snake_case = Node(7 ) __snake_case = Node(8 ) __snake_case = Node(9 ) print(is_full_binary_tree(snake_case__ ) ) print(depth_of_tree(snake_case__ ) ) print('Tree is: ' ) display(snake_case__ ) if __name__ == "__main__": main()
676
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = CycleDiffusionPipeline __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { '''negative_prompt''', '''height''', '''width''', '''negative_prompt_embeds''', } __UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} ) __UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def _a ( self) -> Tuple: torch.manual_seed(0) __snake_case = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) __snake_case = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1_0_0_0 , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0) __snake_case = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0) __snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) __snake_case = CLIPTextModel(lowercase_) __snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') __snake_case = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def _a ( self , lowercase_ , lowercase_=0) -> List[Any]: __snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_)).to(lowercase_) __snake_case = image / 2 + 0.5 if str(lowercase_).startswith('mps'): __snake_case = torch.manual_seed(lowercase_) else: __snake_case = torch.Generator(device=lowercase_).manual_seed(lowercase_) __snake_case = { 'prompt': 'An astronaut riding an elephant', 'source_prompt': 'An astronaut riding a horse', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'eta': 0.1, 'strength': 0.8, 'guidance_scale': 3, 'source_guidance_scale': 1, 'output_type': 'numpy', } return inputs def _a ( self) -> Tuple: __snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator __snake_case = self.get_dummy_components() __snake_case = CycleDiffusionPipeline(**lowercase_) __snake_case = pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) __snake_case = self.get_dummy_inputs(lowercase_) __snake_case = pipe(**lowercase_) __snake_case = output.images __snake_case = images[0, -3:, -3:, -1] assert images.shape == (1, 3_2, 3_2, 3) __snake_case = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU') def _a ( self) -> Dict: __snake_case = self.get_dummy_components() for name, module in components.items(): if hasattr(lowercase_ , 'half'): __snake_case = module.half() __snake_case = CycleDiffusionPipeline(**lowercase_) __snake_case = pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) __snake_case = self.get_dummy_inputs(lowercase_) __snake_case = pipe(**lowercase_) __snake_case = output.images __snake_case = images[0, -3:, -3:, -1] assert images.shape == (1, 3_2, 3_2, 3) __snake_case = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def _a ( self) -> Any: return super().test_save_load_local() @unittest.skip('non-deterministic pipeline') def _a ( self) -> Optional[int]: return super().test_inference_batch_single_identical() @skip_mps def _a ( self) -> int: return super().test_dict_tuple_outputs_equivalent() @skip_mps def _a ( self) -> Optional[int]: return super().test_save_load_optional_components() @skip_mps def _a ( self) -> Any: return super().test_attention_slicing_forward_pass() @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def _a ( self) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self) -> str: __snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/cycle-diffusion/black_colored_car.png') __snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy') __snake_case = init_image.resize((5_1_2, 5_1_2)) __snake_case = 'CompVis/stable-diffusion-v1-4' __snake_case = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler') __snake_case = CycleDiffusionPipeline.from_pretrained( lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , torch_dtype=torch.floataa , revision='fp16') pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) pipe.enable_attention_slicing() __snake_case = 'A black colored car' __snake_case = 'A blue colored car' __snake_case = torch.manual_seed(0) __snake_case = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='np' , ) __snake_case = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image).max() < 5e-1 def _a ( self) -> Union[str, Any]: __snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/cycle-diffusion/black_colored_car.png') __snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy') __snake_case = init_image.resize((5_1_2, 5_1_2)) __snake_case = 'CompVis/stable-diffusion-v1-4' __snake_case = DDIMScheduler.from_pretrained(lowercase_ , subfolder='scheduler') __snake_case = CycleDiffusionPipeline.from_pretrained(lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_) pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) pipe.enable_attention_slicing() __snake_case = 'A black colored car' __snake_case = 'A blue colored car' __snake_case = torch.manual_seed(0) __snake_case = pipe( prompt=lowercase_ , source_prompt=lowercase_ , image=lowercase_ , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase_ , output_type='np' , ) __snake_case = output.images assert np.abs(image - expected_image).max() < 2e-2
676
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : int = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''table-transformer''' __UpperCAmelCase = ['''past_key_values'''] __UpperCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') __snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(lowercase_ , lowercase_): __snake_case = backbone_config.get('model_type') __snake_case = CONFIG_MAPPING[backbone_model_type] __snake_case = config_class.from_dict(lowercase_) # set timm attributes to None __snake_case , __snake_case , __snake_case = None, None, None __snake_case = use_timm_backbone __snake_case = backbone_config __snake_case = num_channels __snake_case = num_queries __snake_case = d_model __snake_case = encoder_ffn_dim __snake_case = encoder_layers __snake_case = encoder_attention_heads __snake_case = decoder_ffn_dim __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = activation_function __snake_case = init_std __snake_case = init_xavier_std __snake_case = encoder_layerdrop __snake_case = decoder_layerdrop __snake_case = encoder_layers __snake_case = auxiliary_loss __snake_case = position_embedding_type __snake_case = backbone __snake_case = use_pretrained_backbone __snake_case = dilation # Hungarian matcher __snake_case = class_cost __snake_case = bbox_cost __snake_case = giou_cost # Loss coefficients __snake_case = mask_loss_coefficient __snake_case = dice_loss_coefficient __snake_case = bbox_loss_coefficient __snake_case = giou_loss_coefficient __snake_case = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_) @property def _a ( self) -> int: return self.encoder_attention_heads @property def _a ( self) -> int: return self.d_model class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = version.parse('''1.11''' ) @property def _a ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def _a ( self) -> float: return 1e-5 @property def _a ( self) -> int: return 1_2
676
1
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = StableUnCLIPPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __UpperCAmelCase = False def _a ( self) -> Tuple: __snake_case = 3_2 __snake_case = embedder_hidden_size # prior components torch.manual_seed(0) __snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') torch.manual_seed(0) __snake_case = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )) torch.manual_seed(0) __snake_case = PriorTransformer( num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=lowercase_ , num_layers=1 , ) torch.manual_seed(0) __snake_case = DDPMScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , ) # regular denoising components torch.manual_seed(0) __snake_case = StableUnCLIPImageNormalizer(embedding_dim=lowercase_) __snake_case = DDPMScheduler(beta_schedule='squaredcos_cap_v2') torch.manual_seed(0) __snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') torch.manual_seed(0) __snake_case = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )) torch.manual_seed(0) __snake_case = UNetaDConditionModel( sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , ) torch.manual_seed(0) __snake_case = DDIMScheduler( beta_schedule='scaled_linear' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=lowercase_ , steps_offset=1 , ) torch.manual_seed(0) __snake_case = AutoencoderKL() __snake_case = { # prior components 'prior_tokenizer': prior_tokenizer, 'prior_text_encoder': prior_text_encoder, 'prior': prior, 'prior_scheduler': prior_scheduler, # image noising components 'image_normalizer': image_normalizer, 'image_noising_scheduler': image_noising_scheduler, # regular denoising components 'tokenizer': tokenizer, 'text_encoder': text_encoder, 'unet': unet, 'scheduler': scheduler, 'vae': vae, } return components def _a ( self , lowercase_ , lowercase_=0) -> Dict: if str(lowercase_).startswith('mps'): __snake_case = torch.manual_seed(lowercase_) else: __snake_case = torch.Generator(device=lowercase_).manual_seed(lowercase_) __snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'prior_num_inference_steps': 2, 'output_type': 'numpy', } return inputs def _a ( self) -> str: __snake_case = torch_device == 'cpu' self._test_attention_slicing_forward_pass(test_max_difference=lowercase_) def _a ( self) -> Tuple: __snake_case = torch_device in ['cpu', 'mps'] self._test_inference_batch_single_identical(test_max_difference=lowercase_) @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def _a ( self) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self) -> Tuple: __snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy') __snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa) pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __snake_case = torch.Generator(device='cpu').manual_seed(0) __snake_case = pipe('anime turle' , generator=lowercase_ , output_type='np') __snake_case = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(lowercase_ , lowercase_) def _a ( self) -> List[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __snake_case = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa) __snake_case = pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() __snake_case = pipe( 'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , ) __snake_case = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 1_0**9
676
from maths.prime_check import is_prime def A ( snake_case__ : int ) -> int: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys UpperCAmelCase__ : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8") UpperCAmelCase__ : Any = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("utf-8").split() UpperCAmelCase__ : Any = "|".join(sys.argv[1:]) UpperCAmelCase__ : Any = re.compile(rF"""^({joined_dirs}).*?\.py$""") UpperCAmelCase__ : Optional[Any] = [x for x in modified_files if regex.match(x)] print(" ".join(relevant_modified_files), end="")
676
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]: '''simple docstring''' __snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
676
1
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase__ : Any = get_tests_dir("fixtures/test_sentencepiece.model") if is_sentencepiece_available(): import sentencepiece as sp UpperCAmelCase__ : Tuple = 5 UpperCAmelCase__ : int = 10 @require_sentencepiece @require_tokenizers class __lowercase ( lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = SpeechaTextTokenizer __UpperCAmelCase = False __UpperCAmelCase = True def _a ( self) -> Optional[Any]: super().setUp() __snake_case = sp.SentencePieceProcessor() spm_model.Load(lowercase_) __snake_case = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(lowercase_))] __snake_case = dict(zip(lowercase_ , range(len(lowercase_)))) __snake_case = Path(self.tmpdirname) save_json(lowercase_ , save_dir / VOCAB_FILES_NAMES['vocab_file']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(lowercase_ , save_dir / VOCAB_FILES_NAMES['spm_file']) __snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def _a ( self) -> Tuple: __snake_case = '<pad>' __snake_case = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_) def _a ( self) -> List[Any]: __snake_case = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-1] , 'j') self.assertEqual(len(lowercase_) , 1_0_0_1) def _a ( self) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1) def _a ( self) -> Tuple: __snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) __snake_case = tokenizer.tokenize('This is a test') self.assertListEqual(lowercase_ , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase_) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , ) __snake_case = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) __snake_case = tokenizer.convert_tokens_to_ids(lowercase_) self.assertListEqual(lowercase_ , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8]) __snake_case = tokenizer.convert_ids_to_tokens(lowercase_) self.assertListEqual( lowercase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def _a ( self) -> Tuple: # fmt: off __snake_case = {'input_ids': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase_ , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class __lowercase ( unittest.TestCase ): __UpperCAmelCase = '''valhalla/s2t_mustc_multilinguial_medium''' __UpperCAmelCase = '''C\'est trop cool''' __UpperCAmelCase = '''Esto es genial''' @classmethod def _a ( cls) -> Dict: __snake_case = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def _a ( self) -> Any: self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 1_1) def _a ( self) -> Dict: self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0) def _a ( self) -> Optional[Any]: self.assertIn(lowercase_ , self.tokenizer.all_special_ids) __snake_case = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2] __snake_case = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_) __snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase_) self.assertEqual(lowercase_ , lowercase_) self.assertNotIn(self.tokenizer.eos_token , lowercase_) def _a ( self) -> Optional[int]: __snake_case = 'fr' __snake_case = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , lowercase_) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def _a ( self) -> Optional[Any]: __snake_case = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) __snake_case = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
676
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCAmelCase__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["memory_attention", "encoder_attn"], ["attention", "attn"], ["/", "."], [".LayerNorm.gamma", "_layer_norm.weight"], [".LayerNorm.beta", "_layer_norm.bias"], ["r.layer_", "r.layers."], ["output_proj", "out_proj"], ["ffn.dense_1.", "fc2."], ["ffn.dense.", "fc1."], ["ffn_layer_norm", "final_layer_norm"], ["kernel", "weight"], ["encoder_layer_norm.", "encoder.layer_norm."], ["decoder_layer_norm.", "decoder.layer_norm."], ["embeddings.weights", "shared.weight"], ] def A ( snake_case__ : List[Any] ) -> str: '''simple docstring''' for pegasus_name, hf_name in PATTERNS: __snake_case = k.replace(snake_case__ , snake_case__ ) return k def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration: '''simple docstring''' __snake_case = DEFAULTS.copy() cfg_kwargs.update(snake_case__ ) __snake_case = PegasusConfig(**snake_case__ ) __snake_case = PegasusForConditionalGeneration(snake_case__ ) __snake_case = torch_model.model.state_dict() __snake_case = {} for k, v in tf_weights.items(): __snake_case = rename_state_dict_key(snake_case__ ) if new_k not in sd: raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if "dense" in k or "proj" in new_k: __snake_case = v.T __snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}" # make sure embedding.padding_idx is respected __snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] ) __snake_case = mapping['shared.weight'] __snake_case = mapping['shared.weight'] __snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping} mapping.update(**snake_case__ ) __snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ ) __snake_case = [ k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight'] ] assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}" assert extra == [], f"no matches found for the following tf keys {extra}" return torch_model def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict: '''simple docstring''' __snake_case = tf.train.list_variables(snake_case__ ) __snake_case = {} __snake_case = ['Adafactor', 'global_step'] for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ): __snake_case = any(pat in name for pat in ignore_name ) if skip_key: continue __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) __snake_case = array return tf_weights def A ( snake_case__ : str , snake_case__ : str ) -> Tuple: '''simple docstring''' # save tokenizer first __snake_case = Path(snake_case__ ).parent.name __snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings'] __snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(snake_case__ ) # convert model __snake_case = get_tf_weights_as_numpy(snake_case__ ) __snake_case = task_specific_params[f"summarization_{dataset}"] if dataset == "large": __snake_case = task_specific_params __snake_case = convert_pegasus(snake_case__ , snake_case__ ) torch_model.save_pretrained(snake_case__ ) __snake_case = torch_model.state_dict() sd.pop('model.decoder.embed_positions.weight' ) sd.pop('model.encoder.embed_positions.weight' ) torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' ) if __name__ == "__main__": UpperCAmelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ : int = parser.parse_args() if args.save_dir is None: UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name UpperCAmelCase__ : str = os.path.join("pegasus", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
676
1
from __future__ import annotations import math from collections.abc import Callable def A ( snake_case__ : Callable[[int | float], int | float] , snake_case__ : int | float , snake_case__ : int | float , snake_case__ : int = 100 , ) -> float: '''simple docstring''' __snake_case = x_start __snake_case = fnc(snake_case__ ) __snake_case = 0.0 for _ in range(snake_case__ ): # Approximates curve as a sequence of linear lines and sums their length __snake_case = (x_end - x_start) / steps + xa __snake_case = fnc(snake_case__ ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step __snake_case = xa __snake_case = fxa return length if __name__ == "__main__": def A ( snake_case__ : Any ) -> Optional[int]: '''simple docstring''' return math.sin(10 * x ) print("f(x) = sin(10 * x)") print("The length of the curve from x = -10 to x = 10 is:") UpperCAmelCase__ : int = 10 while i <= 10_00_00: print(F"""With {i} steps: {line_length(f, -10, 10, i)}""") i *= 10
676
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __lowercase ( lowerCamelCase__ ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]: super().__init__() if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , ) def _a ( self , lowercase_ = "auto") -> Union[str, Any]: if slice_size == "auto": __snake_case = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_) def _a ( self) -> Any: self.enable_attention_slicing(lowercase_) @torch.no_grad() def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]: __snake_case = self.speech_processor.feature_extractor( lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device) __snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0) __snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[ 0 ] if isinstance(lowercase_ , lowercase_): __snake_case = 1 elif isinstance(lowercase_ , lowercase_): __snake_case = len(lowercase_) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(lowercase_)}.") # get prompt text embeddings __snake_case = self.tokenizer( lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) __snake_case = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F" {self.tokenizer.model_max_length} tokens: {removed_text}") __snake_case = text_input_ids[:, : self.tokenizer.model_max_length] __snake_case = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case , __snake_case , __snake_case = text_embeddings.shape __snake_case = text_embeddings.repeat(1 , lowercase_ , 1) __snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case = 42 if negative_prompt is None: __snake_case = [''] * batch_size elif type(lowercase_) is not type(lowercase_): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !=" F" {type(lowercase_)}.") elif isinstance(lowercase_ , lowercase_): __snake_case = [negative_prompt] elif batch_size != len(lowercase_): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" ' the batch size of `prompt`.') else: __snake_case = negative_prompt __snake_case = text_input_ids.shape[-1] __snake_case = self.tokenizer( lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , ) __snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case = uncond_embeddings.shape[1] __snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1) __snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to( self.device) else: __snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_) else: if latents.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") __snake_case = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(lowercase_) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler __snake_case = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) __snake_case = {} if accepts_eta: __snake_case = eta for i, t in enumerate(self.progress_bar(lowercase_)): # expand the latents if we are doing classifier free guidance __snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents __snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_) # predict the noise residual __snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample # perform guidance if do_classifier_free_guidance: __snake_case , __snake_case = noise_pred.chunk(2) __snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ , lowercase_) __snake_case = 1 / 0.1_8215 * latents __snake_case = self.vae.decode(lowercase_).sample __snake_case = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": __snake_case = self.numpy_to_pil(lowercase_) if not return_dict: return image return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
676
1
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class __lowercase : def __init__( self , lowercase_ , lowercase_=1_4 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=4 , lowercase_=4 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=0.02 , ) -> int: __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = rotary_dim __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = initializer_range __snake_case = None __snake_case = vocab_size - 1 __snake_case = vocab_size - 1 __snake_case = vocab_size - 1 def _a ( self) -> int: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length]) __snake_case = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def _a ( self) -> Optional[int]: __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]: __snake_case = 2_0 __snake_case = model_class_name(lowercase_) __snake_case = model.init_cache(input_ids.shape[0] , lowercase_) __snake_case = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4') __snake_case = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) __snake_case = model( input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , ) __snake_case = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4') __snake_case = model( input_ids[:, -1:] , attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , position_ids=lowercase_ , ) __snake_case = model(lowercase_) __snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}") def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any: __snake_case = 2_0 __snake_case = model_class_name(lowercase_) __snake_case = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) __snake_case = model.init_cache(input_ids.shape[0] , lowercase_) __snake_case = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) __snake_case = model( input_ids[:, :-1] , attention_mask=lowercase_ , past_key_values=lowercase_ , position_ids=lowercase_ , ) __snake_case = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4') __snake_case = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowercase_ , position_ids=lowercase_ , ) __snake_case = model(lowercase_ , attention_mask=lowercase_) __snake_case = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}") @require_flax class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () __UpperCAmelCase = (FlaxGPTJForCausalLM,) if is_flax_available() else () def _a ( self) -> Optional[Any]: __snake_case = FlaxGPTJModelTester(self) def _a ( self) -> Any: for model_class_name in self.all_model_classes: __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ , lowercase_) def _a ( self) -> List[str]: for model_class_name in self.all_model_classes: __snake_case , __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowercase_ , lowercase_ , lowercase_ , lowercase_) @tooslow def _a ( self) -> Optional[int]: __snake_case = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left') __snake_case = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowercase_ , truncation=lowercase_) __snake_case = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B') __snake_case = False __snake_case = model.config.eos_token_id __snake_case = jax.jit(model.generate) __snake_case = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id).sequences __snake_case = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_) __snake_case = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(lowercase_ , lowercase_) @is_pt_flax_cross_test def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs __snake_case = self._prepare_for_class(lowercase_ , lowercase_) __snake_case = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __snake_case = model_class.__name__[4:] # Skip the "Flax" at the beginning __snake_case = getattr(lowercase_ , lowercase_) __snake_case , __snake_case = pt_inputs['input_ids'].shape __snake_case = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(lowercase_): __snake_case = 0 __snake_case = 1 __snake_case = 0 __snake_case = 1 __snake_case = pt_model_class(lowercase_).eval() __snake_case = model_class(lowercase_ , dtype=jnp.floataa) __snake_case = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase_) __snake_case = fx_state with torch.no_grad(): __snake_case = pt_model(**lowercase_).to_tuple() __snake_case = fx_model(**lowercase_).to_tuple() self.assertEqual(len(lowercase_) , len(lowercase_) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(lowercase_ , lowercase_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowercase_) __snake_case = model_class.from_pretrained(lowercase_ , from_pt=lowercase_) __snake_case = fx_model_loaded(**lowercase_).to_tuple() self.assertEqual( len(lowercase_) , len(lowercase_) , 'Output lengths differ between Flax and PyTorch') for fx_output_loaded, pt_output in zip(lowercase_ , lowercase_): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2) @is_pt_flax_cross_test def _a ( self) -> Optional[int]: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs __snake_case = self._prepare_for_class(lowercase_ , lowercase_) __snake_case = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __snake_case = model_class.__name__[4:] # Skip the "Flax" at the beginning __snake_case = getattr(lowercase_ , lowercase_) __snake_case = pt_model_class(lowercase_).eval() __snake_case = model_class(lowercase_ , dtype=jnp.floataa) __snake_case = load_flax_weights_in_pytorch_model(lowercase_ , fx_model.params) __snake_case , __snake_case = pt_inputs['input_ids'].shape __snake_case = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(lowercase_): __snake_case = 0 __snake_case = 1 __snake_case = 0 __snake_case = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __snake_case = pt_model(**lowercase_).to_tuple() __snake_case = fx_model(**lowercase_).to_tuple() self.assertEqual(len(lowercase_) , len(lowercase_) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(lowercase_ , lowercase_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowercase_) __snake_case = pt_model_class.from_pretrained(lowercase_ , from_flax=lowercase_) with torch.no_grad(): __snake_case = pt_model_loaded(**lowercase_).to_tuple() self.assertEqual( len(lowercase_) , len(lowercase_) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(lowercase_ , lowercase_): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2) @tooslow def _a ( self) -> List[Any]: for model_class_name in self.all_model_classes: __snake_case = model_class_name.from_pretrained('EleutherAI/gpt-j-6B') __snake_case = model(np.ones((1, 1))) self.assertIsNotNone(lowercase_)
676
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __lowercase ( lowerCamelCase__ ): def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple: super().__init__(*lowercase_ , **lowercase_) __snake_case = eval_examples __snake_case = post_process_function def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]: __snake_case = gen_kwargs.copy() __snake_case = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length ) __snake_case = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams ) __snake_case = gen_kwargs __snake_case = self.eval_dataset if eval_dataset is None else eval_dataset __snake_case = self.get_eval_dataloader(lowercase_) __snake_case = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_) __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) else: __snake_case = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowercase_) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_) return metrics def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]: __snake_case = gen_kwargs.copy() __snake_case = self.get_test_dataloader(lowercase_) # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict') __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
676
1
from __future__ import annotations UpperCAmelCase__ : Dict = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: '''simple docstring''' __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the reference grid __snake_case = 1 __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the action grid __snake_case = init[0] __snake_case = init[1] __snake_case = 0 __snake_case = g + heuristic[x][y] # cost from starting cell to destination cell __snake_case = [[f, g, x, y]] __snake_case = False # flag that is set when search is complete __snake_case = False # flag set if we can't find expand while not found and not resign: if len(snake_case__ ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __snake_case = cell.pop() __snake_case = next_cell[2] __snake_case = next_cell[3] __snake_case = next_cell[1] if x == goal[0] and y == goal[1]: __snake_case = True else: for i in range(len(snake_case__ ) ): # to try out different valid actions __snake_case = x + DIRECTIONS[i][0] __snake_case = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __snake_case = g + cost __snake_case = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __snake_case = 1 __snake_case = i __snake_case = [] __snake_case = goal[0] __snake_case = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __snake_case = x - DIRECTIONS[action[x][y]][0] __snake_case = y - DIRECTIONS[action[x][y]][1] __snake_case = xa __snake_case = ya invpath.append([x, y] ) __snake_case = [] for i in range(len(snake_case__ ) ): path.append(invpath[len(snake_case__ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCAmelCase__ : str = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCAmelCase__ : int = [0, 0] # all coordinates are given in format [y,x] UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1] UpperCAmelCase__ : Optional[Any] = 1 # the cost map which pushes the path closer to the goal UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCAmelCase__ : Optional[int] = 99 UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
676
from __future__ import annotations UpperCAmelCase__ : Dict = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: '''simple docstring''' __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the reference grid __snake_case = 1 __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the action grid __snake_case = init[0] __snake_case = init[1] __snake_case = 0 __snake_case = g + heuristic[x][y] # cost from starting cell to destination cell __snake_case = [[f, g, x, y]] __snake_case = False # flag that is set when search is complete __snake_case = False # flag set if we can't find expand while not found and not resign: if len(snake_case__ ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __snake_case = cell.pop() __snake_case = next_cell[2] __snake_case = next_cell[3] __snake_case = next_cell[1] if x == goal[0] and y == goal[1]: __snake_case = True else: for i in range(len(snake_case__ ) ): # to try out different valid actions __snake_case = x + DIRECTIONS[i][0] __snake_case = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __snake_case = g + cost __snake_case = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __snake_case = 1 __snake_case = i __snake_case = [] __snake_case = goal[0] __snake_case = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __snake_case = x - DIRECTIONS[action[x][y]][0] __snake_case = y - DIRECTIONS[action[x][y]][1] __snake_case = xa __snake_case = ya invpath.append([x, y] ) __snake_case = [] for i in range(len(snake_case__ ) ): path.append(invpath[len(snake_case__ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCAmelCase__ : str = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCAmelCase__ : int = [0, 0] # all coordinates are given in format [y,x] UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1] UpperCAmelCase__ : Optional[Any] = 1 # the cost map which pushes the path closer to the goal UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCAmelCase__ : Optional[int] = 99 UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
676
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
676
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase__ : Any = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class __lowercase ( unittest.TestCase ): def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict: __snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))] if identifier is not None: __snake_case = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_): for n_ in n_identifier: __snake_case = [file for file in files if n_ not in file] else: __snake_case = [file for file in files if n_identifier not in file] __snake_case = ignore_files or [] ignore_files.append('__init__.py') __snake_case = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' , lowercase_) if only_modules: __snake_case = file.split('.')[0] try: __snake_case = getattr(lowercase_ , lowercase_) __snake_case = doctest.DocTestSuite(lowercase_) __snake_case = unittest.TextTestRunner().run(lowercase_) self.assertIs(len(result.failures) , 0) except AttributeError: logger.info(F"{module_identifier} is not a module.") else: __snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS) self.assertIs(result.failed , 0) def _a ( self) -> str: __snake_case = Path('src/transformers') __snake_case = 'modeling' __snake_case = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_) def _a ( self) -> Optional[Any]: __snake_case = Path('src/transformers') __snake_case = 'tokenization' self.analyze_directory(lowercase_ , identifier=lowercase_) def _a ( self) -> List[str]: __snake_case = Path('src/transformers') __snake_case = 'configuration' self.analyze_directory(lowercase_ , identifier=lowercase_) def _a ( self) -> Dict: __snake_case = Path('src/transformers') __snake_case = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(lowercase_ , n_identifier=lowercase_) def _a ( self) -> Dict: __snake_case = Path('docs/source') __snake_case = ['favicon.ico'] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
676
1
def A ( snake_case__ : int , snake_case__ : int ) -> str: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError('iterations must be defined as integers' ) if not isinstance(snake_case__ , snake_case__ ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) __snake_case = '' while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(snake_case__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
676
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations(snake_case__ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations_with_dp_array( snake_case__ : int , snake_case__ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __snake_case = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __snake_case = answer return answer __snake_case = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' __snake_case = [0] * (target + 1) __snake_case = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ : str = 3 UpperCAmelCase__ : Optional[int] = 5 UpperCAmelCase__ : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
676
1
def A ( snake_case__ : int ) -> bool: '''simple docstring''' __snake_case = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
676
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration @require_faiss class __lowercase ( lowerCamelCase__ ): def _a ( self) -> List[str]: __snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]}) return dset def _a ( self) -> Optional[int]: import faiss __snake_case = self._create_dummy_dataset() __snake_case = dset.map( lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_) __snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT) __snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') dset.drop_index('vecs') def _a ( self) -> str: import faiss __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def _a ( self) -> int: import faiss __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name) dset.load_faiss_index('vecs2' , tmp_file.name) os.unlink(tmp_file.name) __snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def _a ( self) -> List[Any]: __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs') dset.drop_index('vecs') self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa))) def _a ( self) -> Any: from elasticsearch import Elasticsearch __snake_case = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: __snake_case = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 3_0) __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}} __snake_case = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=lowercase_) __snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29') self.assertEqual(examples['filename'][0] , 'my_name-train_29') @require_faiss class __lowercase ( lowerCamelCase__ ): def _a ( self) -> Optional[int]: import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsNotNone(index.faiss_index) self.assertEqual(index.faiss_index.ntotal , 5) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa)) self.assertEqual(index.faiss_index.ntotal , 1_0) # single query __snake_case = np.zeros(5 , dtype=np.floataa) __snake_case = 1 __snake_case , __snake_case = index.search(lowercase_) self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1)) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) # batched queries __snake_case = np.eye(5 , dtype=np.floataa)[::-1] __snake_case , __snake_case = index.search_batch(lowercase_) self.assertRaises(lowercase_ , index.search_batch , queries[0]) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([4, 3, 2, 1, 0] , lowercase_) def _a ( self) -> str: import faiss __snake_case = FaissIndex(string_factory='Flat') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) __snake_case = FaissIndex(string_factory='LSH') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexLSH) with self.assertRaises(lowercase_): __snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5)) def _a ( self) -> Optional[int]: import faiss __snake_case = faiss.IndexFlat(5) __snake_case = FaissIndex(custom_index=lowercase_) index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) def _a ( self) -> Tuple: import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5 , dtype=np.floataa)) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file: index.save(tmp_file.name) __snake_case = FaissIndex.load(tmp_file.name) os.unlink(tmp_file.name) __snake_case = np.zeros(5 , dtype=np.floataa) __snake_case = 1 __snake_case , __snake_case = index.search(lowercase_) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) @require_faiss def A ( snake_case__ : List[str] ) -> List[Any]: '''simple docstring''' import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __snake_case = 'index.faiss' __snake_case = f"mock://{index_name}" index.save(snake_case__ , storage_options=mockfs.storage_options ) __snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options ) __snake_case = np.zeros(5 , dtype=np.floataa ) __snake_case = 1 __snake_case , __snake_case = index.search(snake_case__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class __lowercase ( lowerCamelCase__ ): def _a ( self) -> Optional[Any]: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: __snake_case = Elasticsearch() __snake_case = {'acknowledged': True} __snake_case = ElasticSearchIndex(es_client=lowercase_) mocked_bulk.return_value([(True, None)] * 3) index.add_documents(['foo', 'bar', 'foobar']) # single query __snake_case = 'foo' __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case = index.search(lowercase_) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # single query with timeout __snake_case = 'foo' __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # batched queries __snake_case = ['foo', 'bar', 'foobar'] __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case = index.search_batch(lowercase_) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([1, 1, 1] , lowercase_) # batched queries with timeout __snake_case = ['foo', 'bar', 'foobar'] __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([1, 1, 1] , lowercase_)
676
1
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class __lowercase ( unittest.TestCase ): def _a ( self) -> List[Any]: __snake_case = tf.convert_to_tensor( [ [ 8.222_0991, # 3rd highest value; idx. 0 -0.562_0044, 5.2322_9752, 4.038_6393, -6.879_8378, -0.5478_5802, -3.201_2153, 2.9277_7176, 1.8817_1953, 7.3534_1276, # 5th highest value; idx. 9 8.4320_7833, # 2nd highest value; idx. 10 -9.8571_1836, -5.9620_9236, -1.1303_9161, -7.111_5294, -0.836_9633, -5.318_6408, 7.0642_7407, 0.8136_9344, -0.8202_3817, -5.917_9796, 0.5881_3443, -6.9977_8438, 4.7155_1189, -0.1877_1637, 7.4402_0759, # 4th highest value; idx. 25 9.3845_0987, # 1st highest value; idx. 26 2.1266_2941, -9.3256_2038, 2.3565_2522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5842_5518, 4.5313_9238, -5.5751_0464, -6.2803_0699, -7.1952_9503, -4.0212_2551, 1.3933_7037, -6.0670_7057, 1.5948_0517, -9.64_3119, 0.0390_7799, 0.6723_1762, -8.8820_6726, 6.2711_5922, # 4th highest value; idx. 13 2.2852_0723, 4.8276_7506, 4.3042_1368, 8.827_5313, # 2nd highest value; idx. 17 5.4402_9958, # 5th highest value; idx. 18 -4.473_5794, 7.3857_9536, # 3rd highest value; idx. 20 -2.9105_1663, 2.6194_6077, -2.567_4762, -9.4895_9302, -4.0292_2645, -1.3541_6918, 9.6770_2323, # 1st highest value; idx. 27 -5.8947_8553, 1.8537_0467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) __snake_case = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above __snake_case = tf.convert_to_tensor( [8.22_2099, 7.353_4126, 8.43_2078, 7.440_2075, 9.3_8451, 6.27_1159, 8.82_7531, 5.440_2995, 7.385_7956, 9.67_7023] , dtype=tf.floataa , ) # expected non filtered values as noted above __snake_case = tf_top_k_top_p_filtering(lowercase_ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4) __snake_case = output[output != -float('inf')] __snake_case = tf.cast( tf.where(tf.not_equal(lowercase_ , tf.constant(-float('inf') , dtype=tf.floataa))) , dtype=tf.intaa , ) tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1e-12) tf.debugging.assert_equal(lowercase_ , lowercase_) @require_tf class __lowercase ( unittest.TestCase , lowerCamelCase__ ): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): __UpperCAmelCase = { '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def _a ( self) -> Optional[Any]: # TF-only test: tf.saved_model export __snake_case = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') __snake_case = 2 __snake_case = 2 class __lowercase ( tf.Module ): def __init__( self , lowercase_) -> List[str]: super(lowercase_ , self).__init__() __snake_case = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids'), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask'), ) , jit_compile=lowercase_ , ) def _a ( self , lowercase_ , lowercase_) -> Optional[Any]: __snake_case = self.model.generate( input_ids=lowercase_ , attention_mask=lowercase_ , max_new_tokens=lowercase_ , return_dict_in_generate=lowercase_ , ) return {"sequences": outputs["sequences"]} __snake_case = [[2, 0], [1_0_2, 1_0_3]] __snake_case = [[1, 0], [1, 1]] __snake_case = DummyModel(model=lowercase_) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(lowercase_ , lowercase_ , signatures={'serving_default': dummy_model.serving}) __snake_case = tf.saved_model.load(lowercase_).signatures['serving_default'] for batch_size in range(1 , len(lowercase_) + 1): __snake_case = { 'input_ids': tf.constant(dummy_input_ids[:batch_size]), 'attention_mask': tf.constant(dummy_attention_masks[:batch_size]), } __snake_case = serving_func(**lowercase_)['sequences'] __snake_case = test_model.generate(**lowercase_ , max_new_tokens=lowercase_) tf.debugging.assert_equal(lowercase_ , lowercase_) @slow def _a ( self) -> Any: # TF-only test: tf.saved_model export __snake_case = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') __snake_case = 1 __snake_case = 2 class __lowercase ( tf.Module ): def __init__( self , lowercase_) -> Union[str, Any]: super(lowercase_ , self).__init__() __snake_case = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids'), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask'), ) , jit_compile=lowercase_ , ) def _a ( self , lowercase_ , lowercase_) -> Dict: __snake_case = self.model.generate( input_ids=lowercase_ , attention_mask=lowercase_ , max_new_tokens=lowercase_ , return_dict_in_generate=lowercase_ , ) return {"sequences": outputs["sequences"]} __snake_case = [[2], [1_0_2, 1_0_3]] __snake_case = [[1], [1, 1]] __snake_case = DummyModel(model=lowercase_) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(lowercase_ , lowercase_ , signatures={'serving_default': dummy_model.serving}) __snake_case = tf.saved_model.load(lowercase_).signatures['serving_default'] for input_row in range(len(lowercase_)): __snake_case = { 'input_ids': tf.constant([dummy_input_ids[input_row]]), 'attention_mask': tf.constant([dummy_attention_masks[input_row]]), } __snake_case = serving_func(**lowercase_)['sequences'] __snake_case = test_model.generate(**lowercase_ , max_new_tokens=lowercase_) tf.debugging.assert_equal(lowercase_ , lowercase_) @slow @require_tensorflow_text def _a ( self) -> Optional[int]: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=lowercase_) class __lowercase ( tf.keras.layers.Layer ): def __init__( self) -> Dict: super().__init__() __snake_case = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(lowercase_ , 'spiece.model') , 'rb').read()) __snake_case = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5') def _a ( self , lowercase_ , *lowercase_ , **lowercase_) -> Optional[Any]: __snake_case = self.tokenizer.tokenize(lowercase_) __snake_case , __snake_case = text.pad_model_inputs( lowercase_ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id) __snake_case = self.model.generate(input_ids=lowercase_ , attention_mask=lowercase_) return self.tokenizer.detokenize(lowercase_) __snake_case = CompleteSentenceTransformer() __snake_case = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs') __snake_case = complete_model(lowercase_) __snake_case = tf.keras.Model(lowercase_ , lowercase_) keras_model.save(lowercase_) def _a ( self) -> Any: # Has PT equivalent: this test relies on random sampling __snake_case = { 'do_sample': True, 'num_beams': 1, 'top_p': 0.7, 'top_k': 1_0, 'temperature': 0.7, } __snake_case = 1_4 __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2') __snake_case = 'Hello, my dog is cute and' __snake_case = tokenizer(lowercase_ , return_tensors='tf') __snake_case = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2') __snake_case = 6_3_8 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0'): tf.random.set_seed(0) __snake_case = model.generate(**lowercase_ , eos_token_id=lowercase_ , **lowercase_) self.assertTrue(expectation == len(generated_tokens[0])) __snake_case = [6_3_8, 1_9_8] with tf.device(':/CPU:0'): tf.random.set_seed(0) __snake_case = model.generate(**lowercase_ , eos_token_id=lowercase_ , **lowercase_) self.assertTrue(expectation == len(generated_tokens[0])) def _a ( self) -> Union[str, Any]: # Has PT equivalent: ample use of framework-specific code __snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart') __snake_case = 'Hugging Face is a technology company based in New York and Paris.' __snake_case = bart_tokenizer(lowercase_ , return_tensors='tf').input_ids __snake_case = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart') __snake_case = bart_model.generate(lowercase_).numpy() class __lowercase ( lowerCamelCase__ ): def _a ( self , lowercase_ , lowercase_=None , **lowercase_) -> Dict: return super().call(lowercase_ , **lowercase_) __snake_case = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart') __snake_case = bart_model.generate(lowercase_ , foo='bar').numpy() self.assertTrue(np.array_equal(lowercase_ , lowercase_)) class __lowercase ( bart_model.model.encoder.__class__ ): def _a ( self , lowercase_ , **lowercase_) -> Optional[Any]: return super().call(lowercase_ , **lowercase_) __snake_case = FakeEncoder(bart_model.config , bart_model.model.shared) __snake_case = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) __snake_case = bart_model.generate(lowercase_).numpy() with self.assertRaises(lowercase_): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(lowercase_ , foo='bar')
676
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]: '''simple docstring''' __snake_case = args.log_outputs __snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric __snake_case = load_metric('wer' ) __snake_case = load_metric('cer' ) # compute metrics __snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] ) __snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results __snake_case = f"WER: {wer_result}\nCER: {cer_result}" print(snake_case__ ) with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f: f.write(snake_case__ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __snake_case = f"log_{dataset_id}_predictions.txt" __snake_case = f"log_{dataset_id}_targets.txt" with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t: # mapping function to write output def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ): p.write(f"{i}" + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f"{i}" + '\n' ) t.write(batch['target'] + '\n' ) result.map(snake_case__ , with_indices=snake_case__ ) def A ( snake_case__ : str ) -> str: '''simple docstring''' __snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __snake_case = re.sub(snake_case__ , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __snake_case = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: __snake_case = ' '.join(text.split(snake_case__ ) ) return text def A ( snake_case__ : int ) -> Optional[int]: '''simple docstring''' # load dataset __snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __snake_case = AutoFeatureExtractor.from_pretrained(args.model_id ) __snake_case = feature_extractor.sampling_rate # resample audio __snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) ) # load eval pipeline if args.device is None: __snake_case = 0 if torch.cuda.is_available() else -1 __snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case__ : Optional[Any] ): __snake_case = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __snake_case = prediction['text'] __snake_case = normalize_text(batch['sentence'] ) return batch # run inference on all examples __snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case__ , snake_case__ ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) UpperCAmelCase__ : str = parser.parse_args() main(args)
676
1
import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) def A ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : str=False ) -> Dict: '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( 'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise if not is_sharded: __snake_case = os.path.abspath(snake_case__ ) logger.info(f"Loading PyTorch weights from {pt_path}" ) __snake_case = torch.load(snake_case__ , map_location='cpu' ) logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." ) __snake_case = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files __snake_case = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ ) return flax_state_dict def A ( snake_case__ : Tuple[str] , snake_case__ : np.ndarray , snake_case__ : Dict[str, jnp.ndarray] , snake_case__ : str , ) -> (Tuple[str], np.ndarray): '''simple docstring''' def is_key_or_prefix_key_in_dict(snake_case__ : Tuple[str] ) -> bool: return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0 # layer norm __snake_case = pt_tuple_key[:-1] + ('scale',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean __snake_case = pt_tuple_key[:-1] + ('mean',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var __snake_case = pt_tuple_key[:-1] + ('var',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # embedding __snake_case = pt_tuple_key[:-1] + ('embedding',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ): return renamed_pt_tuple_key, pt_tensor # conv layer __snake_case = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ): __snake_case = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer __snake_case = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ): __snake_case = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight __snake_case = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias __snake_case = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 __snake_case = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): __snake_case = pt_tuple_key[-2] + '_g' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): __snake_case = pt_tuple_key[-2] + '_v' if name is not None: __snake_case = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def A ( snake_case__ : List[Any] , snake_case__ : Dict ) -> Any: '''simple docstring''' # convert pytorch tensor to numpy __snake_case = {k: v.numpy() for k, v in pt_state_dict.items()} __snake_case = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: __snake_case = flax_model.params['params'] else: __snake_case = flax_model.params __snake_case = flatten_dict(snake_case__ ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: __snake_case = flatten_dict(flax_model.params['batch_stats'] ) random_flax_state_dict.update(snake_case__ ) __snake_case = {} __snake_case = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()} ) __snake_case = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __snake_case = tuple(pt_key.split('.' ) ) # remove base model prefix if necessary __snake_case = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: __snake_case = pt_tuple_key[1:] # Correctly rename weight parameters __snake_case , __snake_case = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary __snake_case = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: __snake_case = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: __snake_case = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown __snake_case = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown __snake_case = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ) -> Dict: '''simple docstring''' import torch # Load the index __snake_case = {} for shard_file in shard_filenames: # load using msgpack utils __snake_case = torch.load(snake_case__ ) __snake_case = {k: v.numpy() for k, v in pt_state_dict.items()} __snake_case = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: __snake_case = flax_model.params['params'] __snake_case = flatten_dict(snake_case__ ) random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) ) else: __snake_case = flax_model.params __snake_case = flatten_dict(snake_case__ ) __snake_case = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()} ) __snake_case = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __snake_case = tuple(pt_key.split('.' ) ) # remove base model prefix if necessary __snake_case = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: __snake_case = pt_tuple_key[1:] # Correctly rename weight parameters __snake_case , __snake_case = rename_key_and_reshape_tensor( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # add model prefix if necessary __snake_case = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: __snake_case = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: __snake_case = jnp.asarray(snake_case__ ) continue if "var" in flax_key[-1]: __snake_case = jnp.asarray(snake_case__ ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(snake_case__ , snake_case__ ) continue # also add unexpected weight so that warning is thrown __snake_case = jnp.asarray(snake_case__ ) else: # also add unexpected weight so that warning is thrown __snake_case = jnp.asarray(snake_case__ ) return unflatten_dict(snake_case__ ) def A ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] ) -> int: '''simple docstring''' __snake_case = os.path.abspath(snake_case__ ) logger.info(f"Loading Flax weights from {flax_checkpoint_path}" ) # import correct flax class __snake_case = getattr(snake_case__ , 'Flax' + model.__class__.__name__ ) # load flax weight dict with open(snake_case__ , 'rb' ) as state_f: try: __snake_case = from_bytes(snake_case__ , state_f.read() ) except UnpicklingError: raise EnvironmentError(f"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " ) return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) def A ( snake_case__ : List[str] , snake_case__ : str ) -> Any: '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( 'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights __snake_case = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values() if any(snake_case__ ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) __snake_case = jax.tree_util.tree_map( lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ ) __snake_case = flatten_dict(snake_case__ ) __snake_case = pt_model.state_dict() __snake_case = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()} ) __snake_case = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys __snake_case = [] __snake_case = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): __snake_case = flax_key_tuple[0] == pt_model.base_model_prefix __snake_case = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: __snake_case = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: __snake_case = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict: # conv layer __snake_case = flax_key_tuple[:-1] + ('weight',) __snake_case = jnp.transpose(snake_case__ , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict: # linear layer __snake_case = flax_key_tuple[:-1] + ('weight',) __snake_case = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: __snake_case = flax_key_tuple[:-1] + ('weight',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: __snake_case = flax_key_tuple[:-1] + ('running_mean',) elif "var" in flax_key_tuple[-1]: __snake_case = flax_key_tuple[:-1] + ('running_var',) if "batch_stats" in flax_state: __snake_case = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: __snake_case = '.'.join(snake_case__ ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. __snake_case = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: __snake_case = key.split('.' ) __snake_case = None if key_components[-3::2] == ["parametrizations", "original0"]: __snake_case = key_components[-2] + '_g' elif key_components[-3::2] == ["parametrizations", "original1"]: __snake_case = key_components[-2] + '_v' if name is not None: __snake_case = key_components[:-3] + [name] __snake_case = '.'.join(snake_case__ ) __snake_case = key if flax_key in special_pt_names: __snake_case = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected " f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." ) else: # add weight to pytorch dict __snake_case = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor __snake_case = torch.from_numpy(snake_case__ ) # remove from missing keys missing_keys.remove(snake_case__ ) else: # weight is not expected by PyTorch model unexpected_keys.append(snake_case__ ) pt_model.load_state_dict(snake_case__ ) # re-transform missing_keys to list __snake_case = list(snake_case__ ) if len(snake_case__ ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' f" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing" f" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture" ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' f" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect" ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) else: logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" ) if len(snake_case__ ) > 0: logger.warning( f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly" f" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to" ' use it for predictions and inference.' ) else: logger.warning( f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n" 'If your task is similar to the task the model of the checkpoint was trained on, ' f"you can already use {pt_model.__class__.__name__} for predictions without further training." ) return pt_model
676
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def A ( *snake_case__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' with open(snake_case__ , 'r' ) as fh: fcntl.flock(snake_case__ , fcntl.LOCK_EX ) try: print(*snake_case__ ) finally: fcntl.flock(snake_case__ , fcntl.LOCK_UN ) UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) UpperCAmelCase__ : Any = torch.device("cuda", local_rank) UpperCAmelCase__ : Union[str, Any] = socket.gethostname() UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank UpperCAmelCase__ : Optional[int] = dist.get_rank() UpperCAmelCase__ : List[str] = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
676
1
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict UpperCAmelCase__ : Optional[int] = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", ], defaults=[None, None, None, False, False, False, False, False], ) def A ( snake_case__ : Tuple , snake_case__ : List[str] ) -> Tuple: '''simple docstring''' return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def A ( snake_case__ : Optional[int] ) -> List[str]: '''simple docstring''' __snake_case = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ ) __snake_case = TestCommand(*snake_case__ ) test_command.run() __snake_case = os.path.join(snake_case__ , 'README.md' ) assert os.path.exists(snake_case__ ) __snake_case = DatasetInfosDict.from_directory(snake_case__ ) __snake_case = DatasetInfosDict( { 'default': DatasetInfo( features=Features( { 'tokens': Sequence(Value('string' ) ), 'ner_tags': Sequence( ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ), 'langs': Sequence(Value('string' ) ), 'spans': Sequence(Value('string' ) ), } ) , splits=[ { 'name': 'train', 'num_bytes': 235_1563, 'num_examples': 1_0000, }, { 'name': 'validation', 'num_bytes': 23_8418, 'num_examples': 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: __snake_case , __snake_case = getattr(dataset_infos['default'] , snake_case__ ), getattr(expected_dataset_infos['default'] , snake_case__ ) if key == "num_bytes": assert is_apercent_close(snake_case__ , snake_case__ ) elif key == "splits": assert list(snake_case__ ) == list(snake_case__ ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
676
from datetime import datetime import requests def A ( snake_case__ : str ) -> bytes: '''simple docstring''' __snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' __snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(snake_case__ ).content if __name__ == "__main__": UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip() UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
676
1
import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class __lowercase ( unittest.TestCase ): def _a ( self) -> Dict: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _a ( self) -> List[str]: __snake_case = 1 __snake_case = 3 __snake_case = (3_2, 3_2) __snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(lowercase_) return image @property def _a ( self) -> Optional[Any]: torch.manual_seed(0) __snake_case = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) return model @property def _a ( self) -> Union[str, Any]: torch.manual_seed(0) __snake_case = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def _a ( self) -> Tuple: torch.manual_seed(0) __snake_case = RobertaSeriesConfig( hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , ) return RobertaSeriesModelWithTransformation(lowercase_) @property def _a ( self) -> Dict: def extract(*lowercase_ , **lowercase_): class __lowercase : def __init__( self) -> Union[str, Any]: __snake_case = torch.ones([0]) def _a ( self , lowercase_) -> Dict: self.pixel_values.to(lowercase_) return self return Out() return extract def _a ( self) -> Tuple: __snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator __snake_case = self.dummy_cond_unet __snake_case = PNDMScheduler(skip_prk_steps=lowercase_) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta') __snake_case = 7_7 __snake_case = self.dummy_image.to(lowercase_) __snake_case = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk __snake_case = AltDiffusionImgaImgPipeline( unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , ) __snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_) __snake_case = alt_pipe.to(lowercase_) alt_pipe.set_progress_bar_config(disable=lowercase_) __snake_case = 'A painting of a squirrel eating a burger' __snake_case = torch.Generator(device=lowercase_).manual_seed(0) __snake_case = alt_pipe( [prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowercase_ , ) __snake_case = output.images __snake_case = torch.Generator(device=lowercase_).manual_seed(0) __snake_case = alt_pipe( [prompt] , generator=lowercase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=lowercase_ , return_dict=lowercase_ , )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __snake_case = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU') def _a ( self) -> int: __snake_case = self.dummy_cond_unet __snake_case = PNDMScheduler(skip_prk_steps=lowercase_) __snake_case = self.dummy_vae __snake_case = self.dummy_text_encoder __snake_case = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta') __snake_case = 7_7 __snake_case = self.dummy_image.to(lowercase_) # put models in fp16 __snake_case = unet.half() __snake_case = vae.half() __snake_case = bert.half() # make sure here that pndm scheduler skips prk __snake_case = AltDiffusionImgaImgPipeline( unet=lowercase_ , scheduler=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , safety_checker=lowercase_ , feature_extractor=self.dummy_extractor , ) __snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowercase_) __snake_case = alt_pipe.to(lowercase_) alt_pipe.set_progress_bar_config(disable=lowercase_) __snake_case = 'A painting of a squirrel eating a burger' __snake_case = torch.manual_seed(0) __snake_case = alt_pipe( [prompt] , generator=lowercase_ , num_inference_steps=2 , output_type='np' , image=lowercase_ , ).images assert image.shape == (1, 3_2, 3_2, 3) @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU') def _a ( self) -> Any: __snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') # resize to resolution that is divisible by 8 but not 16 or 32 __snake_case = init_image.resize((7_6_0, 5_0_4)) __snake_case = 'BAAI/AltDiffusion' __snake_case = AltDiffusionImgaImgPipeline.from_pretrained( lowercase_ , safety_checker=lowercase_ , ) pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) pipe.enable_attention_slicing() __snake_case = 'A fantasy landscape, trending on artstation' __snake_case = torch.manual_seed(0) __snake_case = pipe( prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , generator=lowercase_ , output_type='np' , ) __snake_case = output.images[0] __snake_case = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 7_6_0, 3) __snake_case = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class __lowercase ( unittest.TestCase ): def _a ( self) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _a ( self) -> Tuple: __snake_case = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') __snake_case = init_image.resize((7_6_8, 5_1_2)) __snake_case = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy') __snake_case = 'BAAI/AltDiffusion' __snake_case = AltDiffusionImgaImgPipeline.from_pretrained( lowercase_ , safety_checker=lowercase_ , ) pipe.to(lowercase_) pipe.set_progress_bar_config(disable=lowercase_) pipe.enable_attention_slicing() __snake_case = 'A fantasy landscape, trending on artstation' __snake_case = torch.manual_seed(0) __snake_case = pipe( prompt=lowercase_ , image=lowercase_ , strength=0.75 , guidance_scale=7.5 , generator=lowercase_ , output_type='np' , ) __snake_case = output.images[0] assert image.shape == (5_1_2, 7_6_8, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image).max() < 1e-2
676
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __lowercase : def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]: __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = scope def _a ( self) -> Union[str, Any]: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length]) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __snake_case = ids_tensor([self.batch_size] , self.num_choices) __snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , ) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]: __snake_case = OpenLlamaModel(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_) __snake_case = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]: __snake_case = True __snake_case = OpenLlamaModel(lowercase_) model.to(lowercase_) model.eval() __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , ) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , ) __snake_case = model(lowercase_ , attention_mask=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str: __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]: __snake_case = True __snake_case = True __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() # first forward pass __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , ) __snake_case = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size) __snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and __snake_case = torch.cat([input_ids, next_tokens] , dim=-1) __snake_case = torch.cat([input_mask, next_mask] , dim=-1) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] # select random slice __snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item() __snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3)) def _a ( self) -> Optional[Any]: __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else () __UpperCAmelCase = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False def _a ( self) -> Tuple: __snake_case = OpenLlamaModelTester(self) __snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7) def _a ( self) -> int: self.config_tester.run_common_tests() def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'single_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> int: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'multi_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def _a ( self) -> List[Any]: pass @parameterized.expand([('linear',), ('dynamic',)]) def _a ( self , lowercase_) -> Optional[Any]: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = ids_tensor([1, 1_0] , config.vocab_size) __snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = OpenLlamaModel(lowercase_) original_model.to(lowercase_) original_model.eval() __snake_case = original_model(lowercase_).last_hidden_state __snake_case = original_model(lowercase_).last_hidden_state set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = {'type': scaling_type, 'factor': 10.0} __snake_case = OpenLlamaModel(lowercase_) scaled_model.to(lowercase_) scaled_model.eval() __snake_case = scaled_model(lowercase_).last_hidden_state __snake_case = scaled_model(lowercase_).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) else: self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
676
1
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations(snake_case__ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations_with_dp_array( snake_case__ : int , snake_case__ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __snake_case = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __snake_case = answer return answer __snake_case = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' __snake_case = [0] * (target + 1) __snake_case = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ : str = 3 UpperCAmelCase__ : Optional[int] = 5 UpperCAmelCase__ : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if number < 0: return False __snake_case = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
import os import unittest from transformers import LxmertTokenizer, LxmertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowercase ( lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = LxmertTokenizer __UpperCAmelCase = LxmertTokenizerFast __UpperCAmelCase = True __UpperCAmelCase = True def _a ( self) -> List[str]: super().setUp() __snake_case = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) def _a ( self , lowercase_) -> List[str]: __snake_case = 'UNwant\u00E9d,running' __snake_case = 'unwanted, running' return input_text, output_text def _a ( self) -> Optional[int]: __snake_case = self.tokenizer_class(self.vocab_file) __snake_case = tokenizer.tokenize('UNwant\u00E9d,running') self.assertListEqual(lowercase_ , ['un', '##want', '##ed', ',', 'runn', '##ing']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_) , [7, 4, 5, 1_0, 8, 9]) def _a ( self) -> Optional[Any]: if not self.test_rust_tokenizer: return __snake_case = self.get_tokenizer() __snake_case = self.get_rust_tokenizer() __snake_case = 'I was born in 92000, and this is falsé.' __snake_case = tokenizer.tokenize(lowercase_) __snake_case = rust_tokenizer.tokenize(lowercase_) self.assertListEqual(lowercase_ , lowercase_) __snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) __snake_case = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) self.assertListEqual(lowercase_ , lowercase_) __snake_case = self.get_rust_tokenizer() __snake_case = tokenizer.encode(lowercase_) __snake_case = rust_tokenizer.encode(lowercase_) self.assertListEqual(lowercase_ , lowercase_)
676
import numpy as np def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return vector * sigmoid(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def A ( snake_case__ : str , snake_case__ : str , snake_case__ : str ) -> Union[str, Any]: '''simple docstring''' def get_masked_lm_array(snake_case__ : str ): __snake_case = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE" __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case__ ) def get_encoder_array(snake_case__ : str ): __snake_case = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE" __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case__ ) def get_encoder_layer_array(snake_case__ : int , snake_case__ : str ): __snake_case = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE" __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case__ ) def get_encoder_attention_layer_array(snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] ): __snake_case = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE" __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) __snake_case = array.reshape(snake_case__ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case__ ) print(f"Loading model based on config from {config_path}..." ) __snake_case = BertConfig.from_json_file(snake_case__ ) __snake_case = BertForMaskedLM(snake_case__ ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case = model.bert.encoder.layer[layer_index] # Self-attention __snake_case = layer.attention.self __snake_case = get_encoder_attention_layer_array( snake_case__ , '_query_dense/kernel' , self_attn.query.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_query_dense/bias' , self_attn.query.bias.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_key_dense/kernel' , self_attn.key.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_key_dense/bias' , self_attn.key.bias.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_value_dense/kernel' , self_attn.value.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case = layer.attention.output __snake_case = get_encoder_attention_layer_array( snake_case__ , '_output_dense/kernel' , self_output.dense.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_output_dense/bias' , self_output.dense.bias.data.shape ) __snake_case = get_encoder_layer_array(snake_case__ , '_attention_layer_norm/gamma' ) __snake_case = get_encoder_layer_array(snake_case__ , '_attention_layer_norm/beta' ) # Intermediate __snake_case = layer.intermediate __snake_case = get_encoder_layer_array(snake_case__ , '_intermediate_dense/kernel' ) __snake_case = get_encoder_layer_array(snake_case__ , '_intermediate_dense/bias' ) # Output __snake_case = layer.output __snake_case = get_encoder_layer_array(snake_case__ , '_output_dense/kernel' ) __snake_case = get_encoder_layer_array(snake_case__ , '_output_dense/bias' ) __snake_case = get_encoder_layer_array(snake_case__ , '_output_layer_norm/gamma' ) __snake_case = get_encoder_layer_array(snake_case__ , '_output_layer_norm/beta' ) # Embeddings __snake_case = get_encoder_array('_position_embedding_layer/embeddings' ) __snake_case = get_encoder_array('_type_embedding_layer/embeddings' ) __snake_case = get_encoder_array('_embedding_norm_layer/gamma' ) __snake_case = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head __snake_case = model.cls.predictions.transform __snake_case = get_masked_lm_array('dense/kernel' ) __snake_case = get_masked_lm_array('dense/bias' ) __snake_case = get_masked_lm_array('layer_norm/gamma' ) __snake_case = get_masked_lm_array('layer_norm/beta' ) __snake_case = get_masked_lm_array('embedding_table' ) # Pooling __snake_case = BertPooler(config=snake_case__ ) __snake_case = get_encoder_array('_pooler_layer/kernel' ) __snake_case = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(snake_case__ ) # Integration test - should load without any errors ;) __snake_case = BertForMaskedLM.from_pretrained(snake_case__ ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True __snake_case = 4 __snake_case = (1 << p) - 1 for _ in range(p - 2 ): __snake_case = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
676
1
from collections.abc import Sequence def A ( snake_case__ : Sequence[float] , snake_case__ : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(snake_case__ ) ) def A ( snake_case__ : Sequence[float] , snake_case__ : float ) -> float: '''simple docstring''' __snake_case = 0.0 for coeff in reversed(snake_case__ ): __snake_case = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase__ : Dict = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"] UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Any = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
def A ( snake_case__ : int ) -> list: '''simple docstring''' # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __snake_case = gray_code_sequence_string(snake_case__ ) # # convert them to integers for i in range(len(snake_case__ ) ): __snake_case = int(sequence[i] , 2 ) return sequence def A ( snake_case__ : int ) -> list: '''simple docstring''' # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __snake_case = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __snake_case = gray_code_sequence_string(bit_count - 1 ) __snake_case = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __snake_case = '0' + smaller_sequence[i] sequence.append(snake_case__ ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __snake_case = '1' + smaller_sequence[i] sequence.append(snake_case__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
676
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
676
1
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = 42 __UpperCAmelCase = None def A ( snake_case__ : Dict , snake_case__ : List[str]=0.999 , snake_case__ : Tuple="cosine" , ) -> Optional[Any]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case__ : str ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case__ : str ): return math.exp(t * -12.0 ) else: raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" ) __snake_case = [] for i in range(snake_case__ ): __snake_case = i / num_diffusion_timesteps __snake_case = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) ) return torch.tensor(snake_case__ , dtype=torch.floataa ) class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ): __UpperCAmelCase = 1 @register_to_config def __init__( self , lowercase_ = 1_0_0_0 , lowercase_ = 0.0001 , lowercase_ = 0.02 , lowercase_ = "linear" , lowercase_ = None , lowercase_ = True , lowercase_ = True , lowercase_ = 0 , lowercase_ = "epsilon" , lowercase_ = 1.0 , **lowercase_ , ) -> Any: if kwargs.get('set_alpha_to_one' , lowercase_) is not None: __snake_case = ( 'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.' ) deprecate('set_alpha_to_one' , '1.0.0' , lowercase_ , standard_warn=lowercase_) __snake_case = kwargs['set_alpha_to_one'] if trained_betas is not None: __snake_case = torch.tensor(lowercase_ , dtype=torch.floataa) elif beta_schedule == "linear": __snake_case = torch.linspace(lowercase_ , lowercase_ , lowercase_ , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __snake_case = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase_ , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __snake_case = betas_for_alpha_bar(lowercase_) else: raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}") __snake_case = 1.0 - self.betas __snake_case = torch.cumprod(self.alphas , dim=0) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. __snake_case = torch.tensor(0.0) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution __snake_case = 1.0 # setable values __snake_case = None __snake_case = torch.from_numpy(np.arange(0 , lowercase_).copy().astype(np.intaa)) def _a ( self , lowercase_ , lowercase_ = None) -> torch.FloatTensor: return sample def _a ( self , lowercase_ , lowercase_ = None) -> Tuple: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" F" maximal {self.config.num_train_timesteps} timesteps.") __snake_case = num_inference_steps __snake_case = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __snake_case = (np.arange(0 , lowercase_) * step_ratio).round().copy().astype(np.intaa) __snake_case = torch.from_numpy(lowercase_).to(lowercase_) self.timesteps += self.config.steps_offset def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 0.0 , lowercase_ = False , lowercase_ = None , lowercase_ = True , ) -> Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) __snake_case = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process __snake_case = self.alphas_cumprod[timestep] __snake_case = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) __snake_case = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": __snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 __snake_case = model_output elif self.config.prediction_type == "sample": __snake_case = model_output __snake_case = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": __snake_case = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output __snake_case = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or" ' `v_prediction`') # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: __snake_case = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __snake_case = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_) def __len__( self) -> List[str]: return self.config.num_train_timesteps
676
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def A ( snake_case__ : List[Any] ) -> Any: '''simple docstring''' __snake_case = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __snake_case = 4 __snake_case = 48 __snake_case = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = [6, 6, 6, 6] __snake_case = 60 __snake_case = [6, 6, 6, 6] __snake_case = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = 4 __snake_case = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __snake_case = 1 __snake_case = 1 __snake_case = 126 __snake_case = 7 __snake_case = 255.0 __snake_case = '' return config def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: __snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: __snake_case = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: __snake_case = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: __snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __snake_case = name.replace('attn' , 'attention.self' ) if "norm1" in name: __snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: __snake_case = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: __snake_case = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: __snake_case = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: __snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: __snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": __snake_case = 'layernorm.weight' if name == "norm.bias": __snake_case = 'layernorm.bias' if "conv_first" in name: __snake_case = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __snake_case = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: __snake_case = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: __snake_case = name.replace('upsample.2' , 'upsample.convolution_1' ) __snake_case = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": __snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) __snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: __snake_case = 'swin2sr.' + name return name def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): __snake_case = orig_state_dict.pop(snake_case__ ) if "qkv" in key: __snake_case = key.split('.' ) __snake_case = int(key_split[1] ) __snake_case = int(key_split[4] ) __snake_case = config.embed_dim if "weight" in key: __snake_case = val[:dim, :] __snake_case = val[dim : dim * 2, :] __snake_case = val[-dim:, :] else: __snake_case = val[:dim] __snake_case = val[dim : dim * 2] __snake_case = val[-dim:] pass else: __snake_case = val return orig_state_dict def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple: '''simple docstring''' __snake_case = get_config(snake_case__ ) __snake_case = SwinaSRForImageSuperResolution(snake_case__ ) model.eval() __snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' ) __snake_case = convert_state_dict(snake_case__ , snake_case__ ) __snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0: raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"Unexpected key {key} in state_dict" ) # verify values __snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' __snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) __snake_case = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __snake_case = 126 if 'Jpeg' in checkpoint_url else 256 __snake_case = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __snake_case = transforms(snake_case__ ).unsqueeze(0 ) if config.num_channels == 1: __snake_case = pixel_values[:, 0, :, :].unsqueeze(1 ) __snake_case = model(snake_case__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 ) print('Looks ok!' ) __snake_case = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } __snake_case = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(snake_case__ ) if push_to_hub: model.push_to_hub(f"caidas/{model_name}" ) processor.push_to_hub(f"caidas/{model_name}" ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") UpperCAmelCase__ : Optional[Any] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
676
1
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename UpperCAmelCase__ : Any = "http://www.mocksite.com/file1.txt" UpperCAmelCase__ : Dict = "\"text\": [\"foo\", \"foo\"]" UpperCAmelCase__ : Union[str, Any] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class __lowercase : __UpperCAmelCase = 200 __UpperCAmelCase = {'''Content-Length''': '''100'''} __UpperCAmelCase = {} def _a ( self , **lowercase_) -> str: return [bytes(lowercase_ , 'utf-8')] def A ( *snake_case__ : int , **snake_case__ : Tuple ) -> Union[str, Any]: '''simple docstring''' return MockResponse() @pytest.mark.parametrize('urls_type' , [str, list, dict] ) def A ( snake_case__ : Dict , snake_case__ : str , snake_case__ : int ) -> List[Any]: '''simple docstring''' import requests monkeypatch.setattr(snake_case__ , 'request' , snake_case__ ) __snake_case = URL if issubclass(snake_case__ , snake_case__ ): __snake_case = url elif issubclass(snake_case__ , snake_case__ ): __snake_case = [url] elif issubclass(snake_case__ , snake_case__ ): __snake_case = {'train': url} __snake_case = 'dummy' __snake_case = 'downloads' __snake_case = tmp_path __snake_case = DownloadConfig( cache_dir=os.path.join(snake_case__ , snake_case__ ) , use_etag=snake_case__ , ) __snake_case = DownloadManager(dataset_name=snake_case__ , download_config=snake_case__ ) __snake_case = dl_manager.download(snake_case__ ) __snake_case = urls for downloaded_paths in [downloaded_paths]: if isinstance(snake_case__ , snake_case__ ): __snake_case = [downloaded_paths] __snake_case = [urls] elif isinstance(snake_case__ , snake_case__ ): assert "train" in downloaded_paths.keys() __snake_case = downloaded_paths.values() __snake_case = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(snake_case__ , snake_case__ ): assert downloaded_path == dl_manager.downloaded_paths[input_url] __snake_case = Path(snake_case__ ) __snake_case = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() __snake_case = downloaded_path.read_text() assert content == CONTENT __snake_case = downloaded_path.with_suffix('.json' ) assert metadata_downloaded_path.exists() __snake_case = json.loads(metadata_downloaded_path.read_text() ) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize('paths_type' , [str, list, dict] ) def A ( snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ) -> Dict: '''simple docstring''' __snake_case = str(snake_case__ ) if issubclass(snake_case__ , snake_case__ ): __snake_case = filename elif issubclass(snake_case__ , snake_case__ ): __snake_case = [filename] elif issubclass(snake_case__ , snake_case__ ): __snake_case = {'train': filename} __snake_case = 'dummy' __snake_case = xz_file.parent __snake_case = 'extracted' __snake_case = DownloadConfig( cache_dir=snake_case__ , use_etag=snake_case__ , ) __snake_case = DownloadManager(dataset_name=snake_case__ , download_config=snake_case__ ) __snake_case = dl_manager.extract(snake_case__ ) __snake_case = paths for extracted_paths in [extracted_paths]: if isinstance(snake_case__ , snake_case__ ): __snake_case = [extracted_paths] __snake_case = [paths] elif isinstance(snake_case__ , snake_case__ ): assert "train" in extracted_paths.keys() __snake_case = extracted_paths.values() __snake_case = paths.values() assert extracted_paths for extracted_path, input_path in zip(snake_case__ , snake_case__ ): assert extracted_path == dl_manager.extracted_paths[input_path] __snake_case = Path(snake_case__ ) __snake_case = extracted_path.parts assert parts[-1] == hash_url_to_filename(snake_case__ , etag=snake_case__ ) assert parts[-2] == extracted_subdir assert extracted_path.exists() __snake_case = extracted_path.read_text() __snake_case = text_file.read_text() assert extracted_file_content == expected_file_content def A ( snake_case__ : Dict , snake_case__ : Optional[int] ) -> Optional[Any]: '''simple docstring''' assert path.endswith('.jsonl' ) for num_items, line in enumerate(snake_case__ , start=1 ): __snake_case = json.loads(line.decode('utf-8' ) ) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] ) def A ( snake_case__ : Any , snake_case__ : List[Any] ) -> Tuple: '''simple docstring''' __snake_case = request.getfixturevalue(snake_case__ ) __snake_case = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ): _test_jsonl(snake_case__ , snake_case__ ) assert num_jsonl == 2 @pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] ) def A ( snake_case__ : List[str] , snake_case__ : Optional[Any] ) -> int: '''simple docstring''' __snake_case = request.getfixturevalue(snake_case__ ) __snake_case = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(snake_case__ ) , start=1 ): _test_jsonl(snake_case__ , snake_case__ ) assert num_tar == 1 assert num_jsonl == 2 def A ( snake_case__ : List[Any] ) -> int: '''simple docstring''' __snake_case = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(snake_case__ ) , start=1 ): assert os.path.basename(snake_case__ ) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ : int = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"] UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Any = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
from __future__ import annotations class __lowercase : def __init__( self , lowercase_) -> None: __snake_case = data __snake_case = None __snake_case = None def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def A ( snake_case__ : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def A ( snake_case__ : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def A ( ) -> None: # Main function for testing. '''simple docstring''' __snake_case = Node(1 ) __snake_case = Node(2 ) __snake_case = Node(3 ) __snake_case = Node(4 ) __snake_case = Node(5 ) __snake_case = Node(6 ) __snake_case = Node(7 ) __snake_case = Node(8 ) __snake_case = Node(9 ) print(is_full_binary_tree(snake_case__ ) ) print(depth_of_tree(snake_case__ ) ) print('Tree is: ' ) display(snake_case__ ) if __name__ == "__main__": main()
676
1
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process UpperCAmelCase__ : List[Any] = logging.getLogger(__name__) @dataclass class __lowercase : __UpperCAmelCase = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) __UpperCAmelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __UpperCAmelCase = field( default='''NER''' , metadata={'''help''': '''Task type to fine tune in training (e.g. NER, POS, etc)'''} ) __UpperCAmelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) __UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __UpperCAmelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class __lowercase : __UpperCAmelCase = field( metadata={'''help''': '''The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'''} ) __UpperCAmelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'''} , ) __UpperCAmelCase = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) __UpperCAmelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def A ( ) -> List[str]: '''simple docstring''' # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" ' --overwrite_output_dir to overcome.' ) __snake_case = import_module('tasks' ) try: __snake_case = getattr(snake_case__ , model_args.task_type ) __snake_case = token_classification_task_clazz() except AttributeError: raise ValueError( f"Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. " f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , snake_case__ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __snake_case = token_classification_task.get_labels(data_args.labels ) __snake_case = dict(enumerate(snake_case__ ) ) __snake_case = len(snake_case__ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , idalabel=snake_case__ , labelaid={label: i for i, label in enumerate(snake_case__ )} , cache_dir=model_args.cache_dir , ) __snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __snake_case = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , ) # Get datasets __snake_case = ( TokenClassificationDataset( token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __snake_case = ( TokenClassificationDataset( token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(snake_case__ : np.ndarray , snake_case__ : np.ndarray ) -> Tuple[List[int], List[int]]: __snake_case = np.argmax(snake_case__ , axis=2 ) __snake_case , __snake_case = preds.shape __snake_case = [[] for _ in range(snake_case__ )] __snake_case = [[] for _ in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(snake_case__ : EvalPrediction ) -> Dict: __snake_case , __snake_case = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(snake_case__ , snake_case__ ), "precision": precision_score(snake_case__ , snake_case__ ), "recall": recall_score(snake_case__ , snake_case__ ), "f1": fa_score(snake_case__ , snake_case__ ), } # Data collator __snake_case = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __snake_case = Trainer( model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , data_collator=snake_case__ , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __snake_case = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case = trainer.evaluate() __snake_case = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(snake_case__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , snake_case__ , snake_case__ ) writer.write('%s = %s\n' % (key, value) ) results.update(snake_case__ ) # Predict if training_args.do_predict: __snake_case = TokenClassificationDataset( token_classification_task=snake_case__ , data_dir=data_args.data_dir , tokenizer=snake_case__ , labels=snake_case__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __snake_case , __snake_case , __snake_case = trainer.predict(snake_case__ ) __snake_case , __snake_case = align_predictions(snake_case__ , snake_case__ ) __snake_case = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(snake_case__ , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , snake_case__ , snake_case__ ) writer.write('%s = %s\n' % (key, value) ) # Save predictions __snake_case = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(snake_case__ , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(snake_case__ , snake_case__ , snake_case__ ) return results def A ( snake_case__ : Any ) -> int: '''simple docstring''' # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
676
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : int = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''table-transformer''' __UpperCAmelCase = ['''past_key_values'''] __UpperCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') __snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(lowercase_ , lowercase_): __snake_case = backbone_config.get('model_type') __snake_case = CONFIG_MAPPING[backbone_model_type] __snake_case = config_class.from_dict(lowercase_) # set timm attributes to None __snake_case , __snake_case , __snake_case = None, None, None __snake_case = use_timm_backbone __snake_case = backbone_config __snake_case = num_channels __snake_case = num_queries __snake_case = d_model __snake_case = encoder_ffn_dim __snake_case = encoder_layers __snake_case = encoder_attention_heads __snake_case = decoder_ffn_dim __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = activation_function __snake_case = init_std __snake_case = init_xavier_std __snake_case = encoder_layerdrop __snake_case = decoder_layerdrop __snake_case = encoder_layers __snake_case = auxiliary_loss __snake_case = position_embedding_type __snake_case = backbone __snake_case = use_pretrained_backbone __snake_case = dilation # Hungarian matcher __snake_case = class_cost __snake_case = bbox_cost __snake_case = giou_cost # Loss coefficients __snake_case = mask_loss_coefficient __snake_case = dice_loss_coefficient __snake_case = bbox_loss_coefficient __snake_case = giou_loss_coefficient __snake_case = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_) @property def _a ( self) -> int: return self.encoder_attention_heads @property def _a ( self) -> int: return self.d_model class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = version.parse('''1.11''' ) @property def _a ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def _a ( self) -> float: return 1e-5 @property def _a ( self) -> int: return 1_2
676
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) @dataclass class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self , **lowercase_) -> str: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __snake_case = deprecated_arg[3:] __snake_case = not kwargs.pop(lowercase_) logger.warning( F"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or" F" {positive_arg}={kwargs[positive_arg]}") __snake_case = kwargs.pop('tpu_name' , self.tpu_name) __snake_case = kwargs.pop('device_idx' , self.device_idx) __snake_case = kwargs.pop('eager_mode' , self.eager_mode) __snake_case = kwargs.pop('use_xla' , self.use_xla) super().__init__(**lowercase_) __UpperCAmelCase = field( default=lowerCamelCase__ , metadata={'''help''': '''Name of TPU'''} , ) __UpperCAmelCase = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) __UpperCAmelCase = field(default=lowerCamelCase__ , metadata={'''help''': '''Benchmark models in eager model.'''} ) __UpperCAmelCase = field( default=lowerCamelCase__ , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def _a ( self) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ['tf']) __snake_case = None if self.tpu: try: if self.tpu_name: __snake_case = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name) else: __snake_case = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __snake_case = None return tpu @cached_property def _a ( self) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ['tf']) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu) __snake_case = tf.distribute.TPUStrategy(self._setup_tpu) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU') __snake_case = tf.distribute.OneDeviceStrategy(device=F"/gpu:{self.device_idx}") else: tf.config.set_visible_devices([] , 'GPU') # disable GPU __snake_case = tf.distribute.OneDeviceStrategy(device=F"/cpu:{self.device_idx}") return strategy @property def _a ( self) -> bool: requires_backends(self , ['tf']) return self._setup_tpu is not None @property def _a ( self) -> "tf.distribute.Strategy": requires_backends(self , ['tf']) return self._setup_strategy @property def _a ( self) -> Tuple: requires_backends(self , ['tf']) return tf.config.list_physical_devices('GPU') @property def _a ( self) -> int: requires_backends(self , ['tf']) if self.cuda: return len(self.gpu_list) return 0 @property def _a ( self) -> bool: return self.n_gpu > 0
676
from maths.prime_check import is_prime def A ( snake_case__ : int ) -> int: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowercase : def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=1_0 , lowercase_=3 , lowercase_=2 , lowercase_=2 , lowercase_=True , lowercase_=True , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0 , lowercase_=0.02 , lowercase_="divided_space_time" , lowercase_=None , ) -> List[Any]: __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = patch_size __snake_case = num_frames __snake_case = is_training __snake_case = use_labels __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = attention_type __snake_case = initializer_range __snake_case = scope __snake_case = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token __snake_case = (image_size // patch_size) ** 2 __snake_case = (num_frames) * self.num_patches_per_frame + 1 def _a ( self) -> Any: __snake_case = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size]) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.num_labels) __snake_case = self.get_config() return config, pixel_values, labels def _a ( self) -> Dict: __snake_case = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) __snake_case = self.num_labels return config def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Optional[int]: __snake_case = TimesformerModel(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_) -> List[str]: __snake_case = TimesformerForVideoClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_) # verify the logits shape __snake_case = torch.Size((self.batch_size, self.num_labels)) self.parent.assertEqual(result.logits.shape , lowercase_) def _a ( self) -> List[Any]: __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification} if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def _a ( self) -> str: __snake_case = TimesformerModelTester(self) __snake_case = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7) def _a ( self , lowercase_ , lowercase_ , lowercase_=False) -> Any: __snake_case = copy.deepcopy(lowercase_) if return_labels: if model_class in get_values(lowercase_): __snake_case = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_) return inputs_dict def _a ( self) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason='TimeSformer does not use inputs_embeds') def _a ( self) -> Dict: pass def _a ( self) -> Union[str, Any]: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(lowercase_) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __snake_case = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear)) def _a ( self) -> List[str]: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(lowercase_) __snake_case = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase_) def _a ( self) -> Tuple: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> int: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase_) @slow def _a ( self) -> Union[str, Any]: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = TimesformerModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def _a ( self) -> Optional[Any]: if not self.has_attentions: pass else: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = True for model_class in self.all_model_classes: __snake_case = self.model_tester.seq_length __snake_case = self.model_tester.num_frames __snake_case = True __snake_case = False __snake_case = True __snake_case = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_)) __snake_case = outputs.attentions self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] __snake_case = True __snake_case = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_)) __snake_case = outputs.attentions self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) __snake_case = len(lowercase_) # Check attention is always last and order is fine __snake_case = True __snake_case = True __snake_case = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_)) self.assertEqual(out_len + 1 , len(lowercase_)) __snake_case = outputs.attentions self.assertEqual(len(lowercase_) , self.model_tester.num_hidden_layers) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def _a ( self) -> List[str]: def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_): __snake_case = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_)) __snake_case = outputs.hidden_states __snake_case = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_) , lowercase_) __snake_case = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) def A ( ) -> str: '''simple docstring''' __snake_case = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) __snake_case = np.load(snake_case__ ) return list(snake_case__ ) @require_torch @require_vision class __lowercase ( unittest.TestCase ): @cached_property def _a ( self) -> Optional[int]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5]) if is_vision_available() else None ) @slow def _a ( self) -> Any: __snake_case = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400').to( lowercase_) __snake_case = self.default_image_processor __snake_case = prepare_video() __snake_case = image_processor(video[:8] , return_tensors='pt').to(lowercase_) # forward pass with torch.no_grad(): __snake_case = model(**lowercase_) # verify the logits __snake_case = torch.Size((1, 4_0_0)) self.assertEqual(outputs.logits.shape , lowercase_) __snake_case = torch.tensor([-0.3016, -0.7713, -0.4205]).to(lowercase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
676
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]: '''simple docstring''' __snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
676
1
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = 0 __UpperCAmelCase = False __UpperCAmelCase = 3.0 class __lowercase ( unittest.TestCase ): def _a ( self) -> List[str]: # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {}) self.assertDictEqual(MockClass(a=2).to_kwargs() , {'a': 2}) self.assertDictEqual(MockClass(a=2 , b=lowercase_).to_kwargs() , {'a': 2, 'b': True}) self.assertDictEqual(MockClass(a=2 , c=2.25).to_kwargs() , {'a': 2, 'c': 2.25}) @require_cuda def _a ( self) -> List[Any]: # If no defaults are changed, `to_kwargs` returns an empty dict. __snake_case = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2) AcceleratorState._reset_state() __snake_case = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler]) print(accelerator.use_fpaa) __snake_case = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0) self.assertEqual(scaler._growth_factor , 2.0) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5) self.assertEqual(scaler._growth_interval , 2_0_0_0) self.assertEqual(scaler._enabled , lowercase_) @require_multi_gpu def _a ( self) -> str: __snake_case = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)] execute_subprocess_async(lowercase_ , env=os.environ.copy()) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) UpperCAmelCase__ : Tuple = Accelerator(kwargs_handlers=[ddp_scaler]) UpperCAmelCase__ : int = torch.nn.Linear(1_00, 2_00) UpperCAmelCase__ : Optional[Any] = accelerator.prepare(model) # Check the values changed in kwargs UpperCAmelCase__ : Tuple = "" UpperCAmelCase__ : Any = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
676
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCAmelCase__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["memory_attention", "encoder_attn"], ["attention", "attn"], ["/", "."], [".LayerNorm.gamma", "_layer_norm.weight"], [".LayerNorm.beta", "_layer_norm.bias"], ["r.layer_", "r.layers."], ["output_proj", "out_proj"], ["ffn.dense_1.", "fc2."], ["ffn.dense.", "fc1."], ["ffn_layer_norm", "final_layer_norm"], ["kernel", "weight"], ["encoder_layer_norm.", "encoder.layer_norm."], ["decoder_layer_norm.", "decoder.layer_norm."], ["embeddings.weights", "shared.weight"], ] def A ( snake_case__ : List[Any] ) -> str: '''simple docstring''' for pegasus_name, hf_name in PATTERNS: __snake_case = k.replace(snake_case__ , snake_case__ ) return k def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration: '''simple docstring''' __snake_case = DEFAULTS.copy() cfg_kwargs.update(snake_case__ ) __snake_case = PegasusConfig(**snake_case__ ) __snake_case = PegasusForConditionalGeneration(snake_case__ ) __snake_case = torch_model.model.state_dict() __snake_case = {} for k, v in tf_weights.items(): __snake_case = rename_state_dict_key(snake_case__ ) if new_k not in sd: raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if "dense" in k or "proj" in new_k: __snake_case = v.T __snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}" # make sure embedding.padding_idx is respected __snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] ) __snake_case = mapping['shared.weight'] __snake_case = mapping['shared.weight'] __snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping} mapping.update(**snake_case__ ) __snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ ) __snake_case = [ k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight'] ] assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}" assert extra == [], f"no matches found for the following tf keys {extra}" return torch_model def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict: '''simple docstring''' __snake_case = tf.train.list_variables(snake_case__ ) __snake_case = {} __snake_case = ['Adafactor', 'global_step'] for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ): __snake_case = any(pat in name for pat in ignore_name ) if skip_key: continue __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) __snake_case = array return tf_weights def A ( snake_case__ : str , snake_case__ : str ) -> Tuple: '''simple docstring''' # save tokenizer first __snake_case = Path(snake_case__ ).parent.name __snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings'] __snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(snake_case__ ) # convert model __snake_case = get_tf_weights_as_numpy(snake_case__ ) __snake_case = task_specific_params[f"summarization_{dataset}"] if dataset == "large": __snake_case = task_specific_params __snake_case = convert_pegasus(snake_case__ , snake_case__ ) torch_model.save_pretrained(snake_case__ ) __snake_case = torch_model.state_dict() sd.pop('model.decoder.embed_positions.weight' ) sd.pop('model.encoder.embed_positions.weight' ) torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' ) if __name__ == "__main__": UpperCAmelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ : int = parser.parse_args() if args.save_dir is None: UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name UpperCAmelCase__ : str = os.path.join("pegasus", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
676
1
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def A ( ) -> Tuple: '''simple docstring''' __snake_case = { 'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': ['a ' * 20, 'a ' * 30, 'b ' * 7], } __snake_case = Dataset.from_dict(snake_case__ ) return dataset class __lowercase ( lowerCamelCase__ ): def _a ( self) -> List[str]: __snake_case = get_dataset() __snake_case = make_duplicate_clusters(lowercase_ , 0.85) self.assertEqual(len(duplicate_clusters[0]) , 2) def _a ( self) -> List[str]: __snake_case = get_dataset() __snake_case , __snake_case = deduplicate_dataset(lowercase_) self.assertEqual(len(lowercase_) , 2) print(lowercase_) self.assertEqual(duplicate_clusters[0][0]['copies'] , 2) self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , lowercase_)
676
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __lowercase ( lowerCamelCase__ ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]: super().__init__() if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , ) def _a ( self , lowercase_ = "auto") -> Union[str, Any]: if slice_size == "auto": __snake_case = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_) def _a ( self) -> Any: self.enable_attention_slicing(lowercase_) @torch.no_grad() def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]: __snake_case = self.speech_processor.feature_extractor( lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device) __snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0) __snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[ 0 ] if isinstance(lowercase_ , lowercase_): __snake_case = 1 elif isinstance(lowercase_ , lowercase_): __snake_case = len(lowercase_) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(lowercase_)}.") # get prompt text embeddings __snake_case = self.tokenizer( lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) __snake_case = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F" {self.tokenizer.model_max_length} tokens: {removed_text}") __snake_case = text_input_ids[:, : self.tokenizer.model_max_length] __snake_case = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case , __snake_case , __snake_case = text_embeddings.shape __snake_case = text_embeddings.repeat(1 , lowercase_ , 1) __snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case = 42 if negative_prompt is None: __snake_case = [''] * batch_size elif type(lowercase_) is not type(lowercase_): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !=" F" {type(lowercase_)}.") elif isinstance(lowercase_ , lowercase_): __snake_case = [negative_prompt] elif batch_size != len(lowercase_): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" ' the batch size of `prompt`.') else: __snake_case = negative_prompt __snake_case = text_input_ids.shape[-1] __snake_case = self.tokenizer( lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , ) __snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case = uncond_embeddings.shape[1] __snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1) __snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to( self.device) else: __snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_) else: if latents.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") __snake_case = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(lowercase_) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler __snake_case = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) __snake_case = {} if accepts_eta: __snake_case = eta for i, t in enumerate(self.progress_bar(lowercase_)): # expand the latents if we are doing classifier free guidance __snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents __snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_) # predict the noise residual __snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample # perform guidance if do_classifier_free_guidance: __snake_case , __snake_case = noise_pred.chunk(2) __snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ , lowercase_) __snake_case = 1 / 0.1_8215 * latents __snake_case = self.vae.decode(lowercase_).sample __snake_case = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": __snake_case = self.numpy_to_pil(lowercase_) if not return_dict: return image return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
676
1
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __lowercase ( lowerCamelCase__ ): def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]: super().__init__(*lowercase_ , **lowercase_) requires_backends(self , 'vision') self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING) def _a ( self , lowercase_=None , lowercase_=None , lowercase_=None) -> Optional[int]: __snake_case = {} __snake_case = {} if prompt is not None: __snake_case = prompt if generate_kwargs is not None: __snake_case = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: __snake_case = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( '\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,' ' please use only one') __snake_case = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self , lowercase_ , **lowercase_) -> Tuple: return super().__call__(lowercase_ , **lowercase_) def _a ( self , lowercase_ , lowercase_=None) -> Tuple: __snake_case = load_image(lowercase_) if prompt is not None: if not isinstance(lowercase_ , lowercase_): raise ValueError( F"Received an invalid text input, got - {type(lowercase_)} - but expected a single string. " 'Note also that one single text can be provided for conditional image to text generation.') __snake_case = self.model.config.model_type if model_type == "git": __snake_case = self.image_processor(images=lowercase_ , return_tensors=self.framework) __snake_case = self.tokenizer(text=lowercase_ , add_special_tokens=lowercase_).input_ids __snake_case = [self.tokenizer.cls_token_id] + input_ids __snake_case = torch.tensor(lowercase_).unsqueeze(0) model_inputs.update({'input_ids': input_ids}) elif model_type == "pix2struct": __snake_case = self.image_processor(images=lowercase_ , header_text=lowercase_ , return_tensors=self.framework) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation __snake_case = self.image_processor(images=lowercase_ , return_tensors=self.framework) __snake_case = self.tokenizer(lowercase_ , return_tensors=self.framework) model_inputs.update(lowercase_) else: raise ValueError(F"Model type {model_type} does not support conditional text generation") else: __snake_case = self.image_processor(images=lowercase_ , return_tensors=self.framework) if self.model.config.model_type == "git" and prompt is None: __snake_case = None return model_inputs def _a ( self , lowercase_ , lowercase_=None) -> Optional[Any]: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs['input_ids'] , lowercase_) and all(x is None for x in model_inputs['input_ids']) ): __snake_case = None if generate_kwargs is None: __snake_case = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. __snake_case = model_inputs.pop(self.model.main_input_name) __snake_case = self.model.generate(lowercase_ , **lowercase_ , **lowercase_) return model_outputs def _a ( self , lowercase_) -> Tuple: __snake_case = [] for output_ids in model_outputs: __snake_case = { 'generated_text': self.tokenizer.decode( lowercase_ , skip_special_tokens=lowercase_ , ) } records.append(lowercase_) return records
676
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __lowercase ( lowerCamelCase__ ): def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple: super().__init__(*lowercase_ , **lowercase_) __snake_case = eval_examples __snake_case = post_process_function def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]: __snake_case = gen_kwargs.copy() __snake_case = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length ) __snake_case = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams ) __snake_case = gen_kwargs __snake_case = self.eval_dataset if eval_dataset is None else eval_dataset __snake_case = self.get_eval_dataloader(lowercase_) __snake_case = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_) __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) else: __snake_case = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowercase_) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_) return metrics def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]: __snake_case = gen_kwargs.copy() __snake_case = self.get_test_dataloader(lowercase_) # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict') __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
676
1
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase__ : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase__ : Dict = { "vocab_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json", }, "merges_file": { "Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt", }, "tokenizer_file": { "Salesforce/codegen-350M-mono": ( "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json" ), }, } UpperCAmelCase__ : str = { "Salesforce/codegen-350M-mono": 20_48, } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = ['''input_ids''', '''attention_mask'''] __UpperCAmelCase = CodeGenTokenizer def __init__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_="<|endoftext|>" , lowercase_=False , **lowercase_ , ) -> Tuple: super().__init__( lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , add_prefix_space=lowercase_ , **lowercase_ , ) if kwargs.pop('add_bos_token' , lowercase_): __snake_case = kwargs.pop('name_or_path' , '') raise ValueError( 'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.' 'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n' F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n" F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" 'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.' ' so that the fast tokenizer works correctly.') __snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , lowercase_) != add_prefix_space: __snake_case = getattr(lowercase_ , pre_tok_state.pop('type')) __snake_case = add_prefix_space __snake_case = pre_tok_class(**lowercase_) __snake_case = add_prefix_space def _a ( self , *lowercase_ , **lowercase_) -> BatchEncoding: __snake_case = kwargs.get('is_split_into_words' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowercase_ , **lowercase_) def _a ( self , *lowercase_ , **lowercase_) -> BatchEncoding: __snake_case = kwargs.get('is_split_into_words' , lowercase_) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*lowercase_ , **lowercase_) def _a ( self , lowercase_ , lowercase_ = None) -> Tuple[str]: __snake_case = self._tokenizer.model.save(lowercase_ , name=lowercase_) return tuple(lowercase_) def _a ( self , lowercase_ , lowercase_ = False , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> str: __snake_case = super().decode( token_ids=lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , **lowercase_ , ) if truncate_before_pattern is not None and len(lowercase_) > 0: __snake_case = self.truncate(lowercase_ , lowercase_) return decoded_text def _a ( self , lowercase_ , lowercase_) -> str: def find_re(lowercase_ , lowercase_ , lowercase_): __snake_case = pattern.search(lowercase_ , lowercase_) return m.start() if m else -1 __snake_case = [re.compile(lowercase_ , re.MULTILINE) for pattern in truncate_before_pattern] __snake_case = list(re.finditer('^print' , lowercase_ , re.MULTILINE)) if len(lowercase_) > 1: __snake_case = completion[: prints[1].start()] __snake_case = list(re.finditer('^def' , lowercase_ , re.MULTILINE)) if len(lowercase_) > 1: __snake_case = completion[: defs[1].start()] __snake_case = 0 __snake_case = [ pos for pos in [find_re(lowercase_ , lowercase_ , lowercase_) for terminal in terminals] if pos != -1 ] if len(lowercase_) > 0: return completion[: min(lowercase_)] else: return completion
676
from __future__ import annotations UpperCAmelCase__ : Dict = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: '''simple docstring''' __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the reference grid __snake_case = 1 __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the action grid __snake_case = init[0] __snake_case = init[1] __snake_case = 0 __snake_case = g + heuristic[x][y] # cost from starting cell to destination cell __snake_case = [[f, g, x, y]] __snake_case = False # flag that is set when search is complete __snake_case = False # flag set if we can't find expand while not found and not resign: if len(snake_case__ ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __snake_case = cell.pop() __snake_case = next_cell[2] __snake_case = next_cell[3] __snake_case = next_cell[1] if x == goal[0] and y == goal[1]: __snake_case = True else: for i in range(len(snake_case__ ) ): # to try out different valid actions __snake_case = x + DIRECTIONS[i][0] __snake_case = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __snake_case = g + cost __snake_case = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __snake_case = 1 __snake_case = i __snake_case = [] __snake_case = goal[0] __snake_case = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __snake_case = x - DIRECTIONS[action[x][y]][0] __snake_case = y - DIRECTIONS[action[x][y]][1] __snake_case = xa __snake_case = ya invpath.append([x, y] ) __snake_case = [] for i in range(len(snake_case__ ) ): path.append(invpath[len(snake_case__ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCAmelCase__ : str = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCAmelCase__ : int = [0, 0] # all coordinates are given in format [y,x] UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1] UpperCAmelCase__ : Optional[Any] = 1 # the cost map which pushes the path closer to the goal UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCAmelCase__ : Optional[int] = 99 UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
676
1
from typing import TYPE_CHECKING from ...utils import _LazyModule UpperCAmelCase__ : List[str] = {"tokenization_byt5": ["ByT5Tokenizer"]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase__ : Any = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class __lowercase ( unittest.TestCase ): def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict: __snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))] if identifier is not None: __snake_case = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_): for n_ in n_identifier: __snake_case = [file for file in files if n_ not in file] else: __snake_case = [file for file in files if n_identifier not in file] __snake_case = ignore_files or [] ignore_files.append('__init__.py') __snake_case = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' , lowercase_) if only_modules: __snake_case = file.split('.')[0] try: __snake_case = getattr(lowercase_ , lowercase_) __snake_case = doctest.DocTestSuite(lowercase_) __snake_case = unittest.TextTestRunner().run(lowercase_) self.assertIs(len(result.failures) , 0) except AttributeError: logger.info(F"{module_identifier} is not a module.") else: __snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS) self.assertIs(result.failed , 0) def _a ( self) -> str: __snake_case = Path('src/transformers') __snake_case = 'modeling' __snake_case = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_) def _a ( self) -> Optional[Any]: __snake_case = Path('src/transformers') __snake_case = 'tokenization' self.analyze_directory(lowercase_ , identifier=lowercase_) def _a ( self) -> List[str]: __snake_case = Path('src/transformers') __snake_case = 'configuration' self.analyze_directory(lowercase_ , identifier=lowercase_) def _a ( self) -> Dict: __snake_case = Path('src/transformers') __snake_case = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(lowercase_ , n_identifier=lowercase_) def _a ( self) -> Dict: __snake_case = Path('docs/source') __snake_case = ['favicon.ico'] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
676
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class __lowercase ( unittest.TestCase ): def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=3_0 , lowercase_=4_0_0 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , lowercase_=1 / 2_5_5 , lowercase_=True , ) -> str: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __snake_case = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} __snake_case = parent __snake_case = batch_size __snake_case = num_channels __snake_case = min_resolution __snake_case = max_resolution __snake_case = do_resize __snake_case = size __snake_case = do_normalize __snake_case = image_mean __snake_case = image_std __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_pad def _a ( self) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _a ( self , lowercase_ , lowercase_=False) -> Any: if not batched: __snake_case = image_inputs[0] if isinstance(lowercase_ , Image.Image): __snake_case , __snake_case = image.size else: __snake_case , __snake_case = image.shape[1], image.shape[2] if w < h: __snake_case = int(self.size['shortest_edge'] * h / w) __snake_case = self.size['shortest_edge'] elif w > h: __snake_case = self.size['shortest_edge'] __snake_case = int(self.size['shortest_edge'] * w / h) else: __snake_case = self.size['shortest_edge'] __snake_case = self.size['shortest_edge'] else: __snake_case = [] for image in image_inputs: __snake_case , __snake_case = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) __snake_case = max(lowercase_ , key=lambda lowercase_: item[0])[0] __snake_case = max(lowercase_ , key=lambda lowercase_: item[1])[1] return expected_height, expected_width @require_torch @require_vision class __lowercase ( lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None def _a ( self) -> Any: __snake_case = ConditionalDetrImageProcessingTester(self) @property def _a ( self) -> int: return self.image_processor_tester.prepare_image_processor_dict() def _a ( self) -> List[Any]: __snake_case = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowercase_ , 'image_mean')) self.assertTrue(hasattr(lowercase_ , 'image_std')) self.assertTrue(hasattr(lowercase_ , 'do_normalize')) self.assertTrue(hasattr(lowercase_ , 'do_resize')) self.assertTrue(hasattr(lowercase_ , 'size')) def _a ( self) -> List[Any]: __snake_case = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}) self.assertEqual(image_processor.do_pad , lowercase_) __snake_case = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=lowercase_) self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4}) self.assertEqual(image_processor.do_pad , lowercase_) def _a ( self) -> Optional[int]: pass def _a ( self) -> Tuple: # Initialize image_processing __snake_case = self.image_processing_class(**self.image_processor_dict) # create random PIL images __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors='pt').pixel_values __snake_case , __snake_case = self.image_processor_tester.get_expected_values(lowercase_) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case , __snake_case = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_) __snake_case = image_processing(lowercase_ , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self) -> List[Any]: # Initialize image_processing __snake_case = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors='pt').pixel_values __snake_case , __snake_case = self.image_processor_tester.get_expected_values(lowercase_) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case = image_processing(lowercase_ , return_tensors='pt').pixel_values __snake_case , __snake_case = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _a ( self) -> Optional[Any]: # Initialize image_processing __snake_case = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor) # Test not batched input __snake_case = image_processing(image_inputs[0] , return_tensors='pt').pixel_values __snake_case , __snake_case = self.image_processor_tester.get_expected_values(lowercase_) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case = image_processing(lowercase_ , return_tensors='pt').pixel_values __snake_case , __snake_case = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _a ( self) -> Optional[int]: # prepare image and target __snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f: __snake_case = json.loads(f.read()) __snake_case = {'image_id': 3_9_7_6_9, 'annotations': target} # encode them __snake_case = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50') __snake_case = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='pt') # verify pixel values __snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape , lowercase_) __snake_case = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4)) # verify area __snake_case = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_)) # verify boxes __snake_case = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_) __snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3)) # verify image_id __snake_case = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_)) # verify is_crowd __snake_case = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_)) # verify class_labels __snake_case = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_)) # verify orig_size __snake_case = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_)) # verify size __snake_case = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_)) @slow def _a ( self) -> Tuple: # prepare image, target and masks_path __snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f: __snake_case = json.loads(f.read()) __snake_case = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target} __snake_case = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them __snake_case = ConditionalDetrImageProcessor(format='coco_panoptic') __snake_case = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='pt') # verify pixel values __snake_case = torch.Size([1, 3, 8_0_0, 1_0_6_6]) self.assertEqual(encoding['pixel_values'].shape , lowercase_) __snake_case = torch.tensor([0.2796, 0.3138, 0.3481]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1e-4)) # verify area __snake_case = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_)) # verify boxes __snake_case = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_) __snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1e-3)) # verify image_id __snake_case = torch.tensor([3_9_7_6_9]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_)) # verify is_crowd __snake_case = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_)) # verify class_labels __snake_case = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_)) # verify masks __snake_case = 8_2_2_8_7_3 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase_) # verify orig_size __snake_case = torch.tensor([4_8_0, 6_4_0]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_)) # verify size __snake_case = torch.tensor([8_0_0, 1_0_6_6]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_))
676
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations(snake_case__ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations_with_dp_array( snake_case__ : int , snake_case__ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __snake_case = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __snake_case = answer return answer __snake_case = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' __snake_case = [0] * (target + 1) __snake_case = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ : str = 3 UpperCAmelCase__ : Optional[int] = 5 UpperCAmelCase__ : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
676
1
from __future__ import annotations def A ( snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[list[str]] , snake_case__ : int , ) -> None: '''simple docstring''' __snake_case = len(snake_case__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(snake_case__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , ) def A ( snake_case__ : int ) -> None: '''simple docstring''' __snake_case = [] depth_first_search([] , [] , [] , snake_case__ , snake_case__ ) # Print all the boards for board in boards: for column in board: print(snake_case__ ) print('' ) print(len(snake_case__ ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
676
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration @require_faiss class __lowercase ( lowerCamelCase__ ): def _a ( self) -> List[str]: __snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]}) return dset def _a ( self) -> Optional[int]: import faiss __snake_case = self._create_dummy_dataset() __snake_case = dset.map( lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_) __snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT) __snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') dset.drop_index('vecs') def _a ( self) -> str: import faiss __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def _a ( self) -> int: import faiss __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name) dset.load_faiss_index('vecs2' , tmp_file.name) os.unlink(tmp_file.name) __snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def _a ( self) -> List[Any]: __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs') dset.drop_index('vecs') self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa))) def _a ( self) -> Any: from elasticsearch import Elasticsearch __snake_case = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: __snake_case = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 3_0) __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}} __snake_case = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=lowercase_) __snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29') self.assertEqual(examples['filename'][0] , 'my_name-train_29') @require_faiss class __lowercase ( lowerCamelCase__ ): def _a ( self) -> Optional[int]: import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsNotNone(index.faiss_index) self.assertEqual(index.faiss_index.ntotal , 5) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa)) self.assertEqual(index.faiss_index.ntotal , 1_0) # single query __snake_case = np.zeros(5 , dtype=np.floataa) __snake_case = 1 __snake_case , __snake_case = index.search(lowercase_) self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1)) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) # batched queries __snake_case = np.eye(5 , dtype=np.floataa)[::-1] __snake_case , __snake_case = index.search_batch(lowercase_) self.assertRaises(lowercase_ , index.search_batch , queries[0]) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([4, 3, 2, 1, 0] , lowercase_) def _a ( self) -> str: import faiss __snake_case = FaissIndex(string_factory='Flat') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) __snake_case = FaissIndex(string_factory='LSH') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexLSH) with self.assertRaises(lowercase_): __snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5)) def _a ( self) -> Optional[int]: import faiss __snake_case = faiss.IndexFlat(5) __snake_case = FaissIndex(custom_index=lowercase_) index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) def _a ( self) -> Tuple: import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5 , dtype=np.floataa)) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file: index.save(tmp_file.name) __snake_case = FaissIndex.load(tmp_file.name) os.unlink(tmp_file.name) __snake_case = np.zeros(5 , dtype=np.floataa) __snake_case = 1 __snake_case , __snake_case = index.search(lowercase_) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) @require_faiss def A ( snake_case__ : List[str] ) -> List[Any]: '''simple docstring''' import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __snake_case = 'index.faiss' __snake_case = f"mock://{index_name}" index.save(snake_case__ , storage_options=mockfs.storage_options ) __snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options ) __snake_case = np.zeros(5 , dtype=np.floataa ) __snake_case = 1 __snake_case , __snake_case = index.search(snake_case__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class __lowercase ( lowerCamelCase__ ): def _a ( self) -> Optional[Any]: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: __snake_case = Elasticsearch() __snake_case = {'acknowledged': True} __snake_case = ElasticSearchIndex(es_client=lowercase_) mocked_bulk.return_value([(True, None)] * 3) index.add_documents(['foo', 'bar', 'foobar']) # single query __snake_case = 'foo' __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case = index.search(lowercase_) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # single query with timeout __snake_case = 'foo' __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # batched queries __snake_case = ['foo', 'bar', 'foobar'] __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case = index.search_batch(lowercase_) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([1, 1, 1] , lowercase_) # batched queries with timeout __snake_case = ['foo', 'bar', 'foobar'] __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([1, 1, 1] , lowercase_)
676
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : Optional[int] = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''imagegpt''' __UpperCAmelCase = ['''past_key_values'''] __UpperCAmelCase = { '''hidden_size''': '''n_embd''', '''max_position_embeddings''': '''n_positions''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , lowercase_=5_1_2 + 1 , lowercase_=3_2 * 3_2 , lowercase_=5_1_2 , lowercase_=2_4 , lowercase_=8 , lowercase_=None , lowercase_="quick_gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1e-5 , lowercase_=0.02 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , **lowercase_ , ) -> Union[str, Any]: __snake_case = vocab_size __snake_case = n_positions __snake_case = n_embd __snake_case = n_layer __snake_case = n_head __snake_case = n_inner __snake_case = activation_function __snake_case = resid_pdrop __snake_case = embd_pdrop __snake_case = attn_pdrop __snake_case = layer_norm_epsilon __snake_case = initializer_range __snake_case = scale_attn_weights __snake_case = use_cache __snake_case = scale_attn_by_inverse_layer_idx __snake_case = reorder_and_upcast_attn __snake_case = tie_word_embeddings super().__init__(tie_word_embeddings=lowercase_ , **lowercase_) class __lowercase ( lowerCamelCase__ ): @property def _a ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ]) def _a ( self , lowercase_ , lowercase_ = 1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , lowercase_ = 3 , lowercase_ = 3_2 , lowercase_ = 3_2 , ) -> Mapping[str, Any]: __snake_case = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_) __snake_case = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_)) return inputs
676
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]: '''simple docstring''' __snake_case = args.log_outputs __snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric __snake_case = load_metric('wer' ) __snake_case = load_metric('cer' ) # compute metrics __snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] ) __snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results __snake_case = f"WER: {wer_result}\nCER: {cer_result}" print(snake_case__ ) with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f: f.write(snake_case__ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __snake_case = f"log_{dataset_id}_predictions.txt" __snake_case = f"log_{dataset_id}_targets.txt" with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t: # mapping function to write output def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ): p.write(f"{i}" + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f"{i}" + '\n' ) t.write(batch['target'] + '\n' ) result.map(snake_case__ , with_indices=snake_case__ ) def A ( snake_case__ : str ) -> str: '''simple docstring''' __snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __snake_case = re.sub(snake_case__ , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __snake_case = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: __snake_case = ' '.join(text.split(snake_case__ ) ) return text def A ( snake_case__ : int ) -> Optional[int]: '''simple docstring''' # load dataset __snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __snake_case = AutoFeatureExtractor.from_pretrained(args.model_id ) __snake_case = feature_extractor.sampling_rate # resample audio __snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) ) # load eval pipeline if args.device is None: __snake_case = 0 if torch.cuda.is_available() else -1 __snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case__ : Optional[Any] ): __snake_case = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __snake_case = prediction['text'] __snake_case = normalize_text(batch['sentence'] ) return batch # run inference on all examples __snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case__ , snake_case__ ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) UpperCAmelCase__ : str = parser.parse_args() main(args)
676
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig UpperCAmelCase__ : str = logging.get_logger(__name__) # General docstring UpperCAmelCase__ : Optional[Any] = "RegNetConfig" # Base docstring UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040" UpperCAmelCase__ : int = [1, 10_88, 7, 7] # Image classification docstring UpperCAmelCase__ : str = "facebook/regnet-y-040" UpperCAmelCase__ : Optional[int] = "tabby, tabby cat" UpperCAmelCase__ : Optional[int] = [ "facebook/regnet-y-040", # See all regnet models at https://huggingface.co/models?filter=regnet ] class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_ = 3 , lowercase_ = 1 , lowercase_ = 1 , lowercase_ = "relu" , ) -> List[str]: super().__init__() __snake_case = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) __snake_case = nn.BatchNormad(lowercase_) __snake_case = ACTaFN[activation] if activation is not None else nn.Identity() def _a ( self , lowercase_) -> int: __snake_case = self.convolution(lowercase_) __snake_case = self.normalization(lowercase_) __snake_case = self.activation(lowercase_) return hidden_state class __lowercase ( nn.Module ): def __init__( self , lowercase_) -> Optional[int]: super().__init__() __snake_case = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act) __snake_case = config.num_channels def _a ( self , lowercase_) -> Any: __snake_case = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.') __snake_case = self.embedder(lowercase_) return hidden_state class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_ = 2) -> Union[str, Any]: super().__init__() __snake_case = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_) __snake_case = nn.BatchNormad(lowercase_) def _a ( self , lowercase_) -> Tensor: __snake_case = self.convolution(lowercase_) __snake_case = self.normalization(lowercase_) return hidden_state class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_) -> List[str]: super().__init__() __snake_case = nn.AdaptiveAvgPoolad((1, 1)) __snake_case = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1) , nn.Sigmoid() , ) def _a ( self , lowercase_) -> int: # b c h w -> b c 1 1 __snake_case = self.pooler(lowercase_) __snake_case = self.attention(lowercase_) __snake_case = hidden_state * attention return hidden_state class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1) -> Any: super().__init__() __snake_case = in_channels != out_channels or stride != 1 __snake_case = max(1 , out_channels // config.groups_width) __snake_case = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_) if should_apply_shortcut else nn.Identity() ) __snake_case = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_) , ) __snake_case = ACTaFN[config.hidden_act] def _a ( self , lowercase_) -> Optional[int]: __snake_case = hidden_state __snake_case = self.layer(lowercase_) __snake_case = self.shortcut(lowercase_) hidden_state += residual __snake_case = self.activation(lowercase_) return hidden_state class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1) -> Dict: super().__init__() __snake_case = in_channels != out_channels or stride != 1 __snake_case = max(1 , out_channels // config.groups_width) __snake_case = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_) if should_apply_shortcut else nn.Identity() ) __snake_case = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_) , ) __snake_case = ACTaFN[config.hidden_act] def _a ( self , lowercase_) -> Union[str, Any]: __snake_case = hidden_state __snake_case = self.layer(lowercase_) __snake_case = self.shortcut(lowercase_) hidden_state += residual __snake_case = self.activation(lowercase_) return hidden_state class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 2 , lowercase_ = 2 , ) -> Dict: super().__init__() __snake_case = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer __snake_case = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_) for _ in range(depth - 1)] , ) def _a ( self , lowercase_) -> Union[str, Any]: __snake_case = self.layers(lowercase_) return hidden_state class __lowercase ( nn.Module ): def __init__( self , lowercase_) -> Union[str, Any]: super().__init__() __snake_case = nn.ModuleList([]) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , )) __snake_case = zip(config.hidden_sizes , config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:]): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_)) def _a ( self , lowercase_ , lowercase_ = False , lowercase_ = True) -> BaseModelOutputWithNoAttention: __snake_case = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __snake_case = hidden_states + (hidden_state,) __snake_case = stage_module(lowercase_) if output_hidden_states: __snake_case = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_) class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = RegNetConfig __UpperCAmelCase = '''regnet''' __UpperCAmelCase = '''pixel_values''' __UpperCAmelCase = True def _a ( self , lowercase_) -> Union[str, Any]: if isinstance(lowercase_ , nn.Convad): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu') elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm)): nn.init.constant_(module.weight , 1) nn.init.constant_(module.bias , 0) def _a ( self , lowercase_ , lowercase_=False) -> Union[str, Any]: if isinstance(lowercase_ , lowercase_): __snake_case = value UpperCAmelCase__ : List[str] = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n" UpperCAmelCase__ : Dict = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n" @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , lowerCamelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class __lowercase ( lowerCamelCase__ ): def __init__( self , lowercase_) -> List[Any]: super().__init__(lowercase_) __snake_case = config __snake_case = RegNetEmbeddings(lowercase_) __snake_case = RegNetEncoder(lowercase_) __snake_case = nn.AdaptiveAvgPoolad((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None) -> BaseModelOutputWithPoolingAndNoAttention: __snake_case = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __snake_case = return_dict if return_dict is not None else self.config.use_return_dict __snake_case = self.embedder(lowercase_) __snake_case = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_) __snake_case = encoder_outputs[0] __snake_case = self.pooler(lowercase_) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , lowerCamelCase__ , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class __lowercase ( lowerCamelCase__ ): def __init__( self , lowercase_) -> Any: super().__init__(lowercase_) __snake_case = config.num_labels __snake_case = RegNetModel(lowercase_) # classification head __snake_case = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _a ( self , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , ) -> ImageClassifierOutputWithNoAttention: __snake_case = return_dict if return_dict is not None else self.config.use_return_dict __snake_case = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_) __snake_case = outputs.pooler_output if return_dict else outputs[1] __snake_case = self.classifier(lowercase_) __snake_case = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __snake_case = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __snake_case = 'single_label_classification' else: __snake_case = 'multi_label_classification' if self.config.problem_type == "regression": __snake_case = MSELoss() if self.num_labels == 1: __snake_case = loss_fct(logits.squeeze() , labels.squeeze()) else: __snake_case = loss_fct(lowercase_ , lowercase_) elif self.config.problem_type == "single_label_classification": __snake_case = CrossEntropyLoss() __snake_case = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1)) elif self.config.problem_type == "multi_label_classification": __snake_case = BCEWithLogitsLoss() __snake_case = loss_fct(lowercase_ , lowercase_) if not return_dict: __snake_case = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states)
676
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def A ( *snake_case__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' with open(snake_case__ , 'r' ) as fh: fcntl.flock(snake_case__ , fcntl.LOCK_EX ) try: print(*snake_case__ ) finally: fcntl.flock(snake_case__ , fcntl.LOCK_UN ) UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) UpperCAmelCase__ : Any = torch.device("cuda", local_rank) UpperCAmelCase__ : Union[str, Any] = socket.gethostname() UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank UpperCAmelCase__ : Optional[int] = dist.get_rank() UpperCAmelCase__ : List[str] = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
676
1
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def A ( snake_case__ : str , snake_case__ : int=1 ) -> Union[str, Any]: '''simple docstring''' if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def A ( snake_case__ : List[Any] , snake_case__ : List[str]=0 ) -> List[Any]: '''simple docstring''' __snake_case = [] for old_item in old_list: __snake_case = old_item.replace('in_layers.0' , 'norm1' ) __snake_case = new_item.replace('in_layers.2' , 'conv1' ) __snake_case = new_item.replace('out_layers.0' , 'norm2' ) __snake_case = new_item.replace('out_layers.3' , 'conv2' ) __snake_case = new_item.replace('emb_layers.1' , 'time_emb_proj' ) __snake_case = new_item.replace('skip_connection' , 'conv_shortcut' ) __snake_case = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def A ( snake_case__ : Any , snake_case__ : Dict=0 ) -> Dict: '''simple docstring''' __snake_case = [] for old_item in old_list: __snake_case = old_item __snake_case = new_item.replace('norm.weight' , 'group_norm.weight' ) __snake_case = new_item.replace('norm.bias' , 'group_norm.bias' ) __snake_case = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) __snake_case = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) __snake_case = shave_segments(snake_case__ , n_shave_prefix_segments=snake_case__ ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def A ( snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None ) -> Optional[int]: '''simple docstring''' assert isinstance(snake_case__ , snake_case__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): __snake_case = old_checkpoint[path] __snake_case = old_tensor.shape[0] // 3 __snake_case = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) __snake_case = old_tensor.shape[0] // config['num_head_channels'] // 3 __snake_case = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) __snake_case , __snake_case , __snake_case = old_tensor.split(channels // num_heads , dim=1 ) __snake_case = query.reshape(snake_case__ ) __snake_case = key.reshape(snake_case__ ) __snake_case = value.reshape(snake_case__ ) for path in paths: __snake_case = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here __snake_case = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) __snake_case = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) __snake_case = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: __snake_case = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: __snake_case = old_checkpoint[path['old']][:, :, 0] else: __snake_case = old_checkpoint[path['old']] def A ( snake_case__ : List[str] , snake_case__ : Tuple ) -> str: '''simple docstring''' __snake_case = {} __snake_case = checkpoint['time_embed.0.weight'] __snake_case = checkpoint['time_embed.0.bias'] __snake_case = checkpoint['time_embed.2.weight'] __snake_case = checkpoint['time_embed.2.bias'] __snake_case = checkpoint['input_blocks.0.0.weight'] __snake_case = checkpoint['input_blocks.0.0.bias'] __snake_case = checkpoint['out.0.weight'] __snake_case = checkpoint['out.0.bias'] __snake_case = checkpoint['out.2.weight'] __snake_case = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only __snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) __snake_case = { layer_id: [key for key in checkpoint if f"input_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the middle blocks only __snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) __snake_case = { layer_id: [key for key in checkpoint if f"middle_block.{layer_id}" in key] for layer_id in range(snake_case__ ) } # Retrieves the keys for the output blocks only __snake_case = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) __snake_case = { layer_id: [key for key in checkpoint if f"output_blocks.{layer_id}" in key] for layer_id in range(snake_case__ ) } for i in range(1 , snake_case__ ): __snake_case = (i - 1) // (config['num_res_blocks'] + 1) __snake_case = (i - 1) % (config['num_res_blocks'] + 1) __snake_case = [key for key in input_blocks[i] if f"input_blocks.{i}.0" in key] __snake_case = [key for key in input_blocks[i] if f"input_blocks.{i}.1" in key] if f"input_blocks.{i}.0.op.weight" in checkpoint: __snake_case = checkpoint[ f"input_blocks.{i}.0.op.weight" ] __snake_case = checkpoint[ f"input_blocks.{i}.0.op.bias" ] continue __snake_case = renew_resnet_paths(snake_case__ ) __snake_case = {'old': f"input_blocks.{i}.0", 'new': f"down_blocks.{block_id}.resnets.{layer_in_block_id}"} __snake_case = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path, resnet_op] , config=snake_case__ ) if len(snake_case__ ): __snake_case = renew_attention_paths(snake_case__ ) __snake_case = { 'old': f"input_blocks.{i}.1", 'new': f"down_blocks.{block_id}.attentions.{layer_in_block_id}", } __snake_case = { f"input_blocks.{i}.1.qkv.bias": { 'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", 'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", 'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, f"input_blocks.{i}.1.qkv.weight": { 'key': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", 'query': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", 'value': f"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case__ , config=snake_case__ , ) __snake_case = middle_blocks[0] __snake_case = middle_blocks[1] __snake_case = middle_blocks[2] __snake_case = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) __snake_case = renew_resnet_paths(snake_case__ ) assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , config=snake_case__ ) __snake_case = renew_attention_paths(snake_case__ ) __snake_case = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , attention_paths_to_split=snake_case__ , config=snake_case__ ) for i in range(snake_case__ ): __snake_case = i // (config['num_res_blocks'] + 1) __snake_case = i % (config['num_res_blocks'] + 1) __snake_case = [shave_segments(snake_case__ , 2 ) for name in output_blocks[i]] __snake_case = {} for layer in output_block_layers: __snake_case , __snake_case = layer.split('.' )[0], shave_segments(snake_case__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case__ ) else: __snake_case = [layer_name] if len(snake_case__ ) > 1: __snake_case = [key for key in output_blocks[i] if f"output_blocks.{i}.0" in key] __snake_case = [key for key in output_blocks[i] if f"output_blocks.{i}.1" in key] __snake_case = renew_resnet_paths(snake_case__ ) __snake_case = renew_resnet_paths(snake_case__ ) __snake_case = {'old': f"output_blocks.{i}.0", 'new': f"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , config=snake_case__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): __snake_case = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) __snake_case = checkpoint[ f"output_blocks.{i}.{index}.conv.weight" ] __snake_case = checkpoint[ f"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(snake_case__ ) == 2: __snake_case = [] if len(snake_case__ ): __snake_case = renew_attention_paths(snake_case__ ) __snake_case = { 'old': f"output_blocks.{i}.1", 'new': f"up_blocks.{block_id}.attentions.{layer_in_block_id}", } __snake_case = { f"output_blocks.{i}.1.qkv.bias": { 'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", 'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", 'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, f"output_blocks.{i}.1.qkv.weight": { 'key': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", 'query': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", 'value': f"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( snake_case__ , snake_case__ , snake_case__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=snake_case__ , ) else: __snake_case = renew_resnet_paths(snake_case__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: __snake_case = '.'.join(['output_blocks', str(snake_case__ ), path['old']] ) __snake_case = '.'.join(['up_blocks', str(snake_case__ ), 'resnets', str(snake_case__ ), path['new']] ) __snake_case = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") UpperCAmelCase__ : int = parser.parse_args() UpperCAmelCase__ : Optional[int] = torch.load(args.checkpoint_path) with open(args.config_file) as f: UpperCAmelCase__ : Optional[Any] = json.loads(f.read()) UpperCAmelCase__ : str = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] UpperCAmelCase__ : Tuple = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: UpperCAmelCase__ : Tuple = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) UpperCAmelCase__ : Optional[Any] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) UpperCAmelCase__ : List[str] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
676
from datetime import datetime import requests def A ( snake_case__ : str ) -> bytes: '''simple docstring''' __snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' __snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(snake_case__ ).content if __name__ == "__main__": UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip() UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
676
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ : int = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __lowercase : def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]: __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = scope def _a ( self) -> Union[str, Any]: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length]) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __snake_case = ids_tensor([self.batch_size] , self.num_choices) __snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , ) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]: __snake_case = OpenLlamaModel(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_) __snake_case = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]: __snake_case = True __snake_case = OpenLlamaModel(lowercase_) model.to(lowercase_) model.eval() __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , ) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , ) __snake_case = model(lowercase_ , attention_mask=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str: __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]: __snake_case = True __snake_case = True __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() # first forward pass __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , ) __snake_case = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size) __snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and __snake_case = torch.cat([input_ids, next_tokens] , dim=-1) __snake_case = torch.cat([input_mask, next_mask] , dim=-1) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] # select random slice __snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item() __snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3)) def _a ( self) -> Optional[Any]: __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else () __UpperCAmelCase = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False def _a ( self) -> Tuple: __snake_case = OpenLlamaModelTester(self) __snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7) def _a ( self) -> int: self.config_tester.run_common_tests() def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'single_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> int: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'multi_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def _a ( self) -> List[Any]: pass @parameterized.expand([('linear',), ('dynamic',)]) def _a ( self , lowercase_) -> Optional[Any]: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = ids_tensor([1, 1_0] , config.vocab_size) __snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = OpenLlamaModel(lowercase_) original_model.to(lowercase_) original_model.eval() __snake_case = original_model(lowercase_).last_hidden_state __snake_case = original_model(lowercase_).last_hidden_state set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = {'type': scaling_type, 'factor': 10.0} __snake_case = OpenLlamaModel(lowercase_) scaled_model.to(lowercase_) scaled_model.eval() __snake_case = scaled_model(lowercase_).last_hidden_state __snake_case = scaled_model(lowercase_).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) else: self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
676
1
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__) class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = ['''pixel_values'''] def __init__( self , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = 8 , **lowercase_ , ) -> None: super().__init__(**lowercase_) __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_pad __snake_case = pad_size def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_) -> np.ndarray: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _a ( self , lowercase_ , lowercase_ , lowercase_ = None) -> List[str]: __snake_case , __snake_case = get_image_size(lowercase_) __snake_case = (old_height // size + 1) * size - old_height __snake_case = (old_width // size + 1) * size - old_width return pad(lowercase_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase_) def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> int: __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case = do_pad if do_pad is not None else self.do_pad __snake_case = pad_size if pad_size is not None else self.pad_size __snake_case = make_list_of_images(lowercase_) if not valid_images(lowercase_): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') # All transformations expect numpy arrays. __snake_case = [to_numpy_array(lowercase_) for image in images] if do_rescale: __snake_case = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images] if do_pad: __snake_case = [self.pad(lowercase_ , size=lowercase_) for image in images] __snake_case = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images] __snake_case = {'pixel_values': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if number < 0: return False __snake_case = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCAmelCase__ : Any = logging.get_logger(__name__) UpperCAmelCase__ : int = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear", "self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed", "self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } UpperCAmelCase__ : str = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def A ( snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' for attribute in key.split('.' ): __snake_case = getattr(snake_case__ , snake_case__ ) if weight_type is not None: __snake_case = getattr(snake_case__ , snake_case__ ).shape else: __snake_case = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __snake_case = value elif weight_type == "weight_g": __snake_case = value elif weight_type == "weight_v": __snake_case = value elif weight_type == "bias": __snake_case = value else: __snake_case = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def A ( snake_case__ : List[Any] , snake_case__ : Optional[Any] ) -> Any: '''simple docstring''' __snake_case = [] __snake_case = fairseq_model.state_dict() __snake_case = hf_model.feature_extractor for name, value in fairseq_dict.items(): __snake_case = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , ) __snake_case = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __snake_case = True if "*" in mapped_key: __snake_case = name.split(snake_case__ )[0].split('.' )[-2] __snake_case = mapped_key.replace('*' , snake_case__ ) if "weight_g" in name: __snake_case = 'weight_g' elif "weight_v" in name: __snake_case = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __snake_case = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case = 'weight' else: __snake_case = None set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) continue if not is_used: unused_weights.append(snake_case__ ) logger.warning(f"Unused weights: {unused_weights}" ) def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : List[str] ) -> Dict: '''simple docstring''' __snake_case = full_name.split('conv_layers.' )[-1] __snake_case = name.split('.' ) __snake_case = int(items[0] ) __snake_case = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __snake_case = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __snake_case = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __snake_case = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __snake_case = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(snake_case__ ) @torch.no_grad() def A ( snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : int=None ) -> List[str]: '''simple docstring''' # load the pre-trained checkpoints __snake_case = torch.load(snake_case__ ) __snake_case = WavLMConfigOrig(checkpoint['cfg'] ) __snake_case = WavLMOrig(snake_case__ ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __snake_case = WavLMConfig.from_pretrained(snake_case__ ) else: __snake_case = WavLMConfig() __snake_case = WavLMModel(snake_case__ ) recursively_load_weights(snake_case__ , snake_case__ ) hf_wavlm.save_pretrained(snake_case__ ) if __name__ == "__main__": UpperCAmelCase__ : str = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
676
import numpy as np def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return vector * sigmoid(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
import os import time import numpy as np import onnxruntime as ort UpperCAmelCase__ : int = "1" UpperCAmelCase__ : Optional[Any] = "0" UpperCAmelCase__ : Tuple = "1" UpperCAmelCase__ : List[str] = ort.SessionOptions() UpperCAmelCase__ : str = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("Create inference session...") UpperCAmelCase__ : List[str] = ["TensorrtExecutionProvider", "CUDAExecutionProvider"] UpperCAmelCase__ : Union[str, Any] = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider) UpperCAmelCase__ : Union[str, Any] = ort.RunOptions() UpperCAmelCase__ : str = 1_28 UpperCAmelCase__ : Union[str, Any] = 1 UpperCAmelCase__ : Optional[int] = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ : Dict = np.ones((batch, sequence), dtype=np.intaa) UpperCAmelCase__ : str = np.ones((batch, sequence), dtype=np.intaa) print("Warm up phase...") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("Start inference...") UpperCAmelCase__ : List[str] = time.time() UpperCAmelCase__ : Dict = 20_00 UpperCAmelCase__ : str = {} for iter in range(max_iters): UpperCAmelCase__ : int = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 10_00 / max_iters))
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True __snake_case = 4 __snake_case = (1 << p) - 1 for _ in range(p - 2 ): __snake_case = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
676
1
# HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers UpperCAmelCase__ : Optional[int] = float("nan") class __lowercase : def __init__( self , lowercase_) -> Optional[Any]: __snake_case = sys.stdout __snake_case = open(lowercase_ , 'a') def __getattr__( self , lowercase_) -> Tuple: return getattr(self.stdout , lowercase_) def _a ( self , lowercase_) -> str: self.stdout.write(lowercase_) # strip tqdm codes self.file.write(re.sub(r'^.*\r' , '' , lowercase_ , 0 , re.M)) def A ( snake_case__ : Union[str, Any]=80 , snake_case__ : str=False ) -> Any: '''simple docstring''' __snake_case = [] # deal with critical env vars __snake_case = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: __snake_case = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(f"{key}={val}" ) # python executable (not always needed if the script is executable) __snake_case = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes __snake_case = [] __snake_case = '' while len(snake_case__ ) > 0: current_line += f"{cmd.pop(0 )} " if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) __snake_case = '' return "\\\n".join(snake_case__ ) def A ( snake_case__ : Any , snake_case__ : Optional[int] ) -> Dict: '''simple docstring''' # unwrap multi-line input __snake_case = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own __snake_case = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += f" --output_dir {output_dir}" # ensure we have --overwrite_output_dir __snake_case = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[str] ) -> Dict: '''simple docstring''' # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) __snake_case = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams __snake_case = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / f"log.{prefix}.stdout.txt" , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / f"log.{prefix}.stderr.txt" , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(f"{output_dir}/all_results.json" , 'r' , encoding='utf-8' ) as f: __snake_case = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def A ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : Dict , ) -> int: '''simple docstring''' __snake_case = [] __snake_case = [] __snake_case = f"{id}: {variation:<{longest_variation_len}}" __snake_case = f"{preamble}: " __snake_case = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): __snake_case = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) __snake_case = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" __snake_case = f"\33[2K\r{outcome}" if len(snake_case__ ) > 0: __snake_case = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} __snake_case = round(mean_metrics[target_metric_key] , 2 ) __snake_case = f"{outcome} {mean_target}" if len(snake_case__ ) > 1: results_str += f" {tuple(round(snake_case__ , 2 ) for x in results )}" print(snake_case__ ) __snake_case = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def A ( ) -> Optional[Any]: '''simple docstring''' __snake_case = torch.cuda.get_device_properties(torch.device('cuda' ) ) return f"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n" def A ( snake_case__ : str , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : Tuple ) -> Union[str, Any]: '''simple docstring''' __snake_case = pd.DataFrame(snake_case__ ) __snake_case = 'variation' __snake_case = 'diff_%' __snake_case = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan __snake_case = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel __snake_case = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): __snake_case = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns __snake_case = [variation_key, target_metric_key, diff_key, *report_metric_keys] __snake_case = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize __snake_case = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible __snake_case = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) __snake_case = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) __snake_case = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def A ( ) -> str: '''simple docstring''' __snake_case = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) __snake_case = parser.parse_args() __snake_case = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) __snake_case = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations __snake_case = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty __snake_case = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) __snake_case = max(len(snake_case__ ) for x in variations ) # split wanted keys __snake_case = args.report_metric_keys.split() # capture prints into a log file for convenience __snake_case = f"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt" print(f"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt" ) print(f"and this script's output is also piped into {report_fn}" ) __snake_case = Tee(snake_case__ ) print(f"\n*** Running {len(snake_case__ )} benchmarks:" ) print(f"Base command: {' '.join(snake_case__ )}" ) __snake_case = 'variation' __snake_case = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): __snake_case = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"] UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Any = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class __lowercase : def __init__( self , lowercase_ , lowercase_=3 , lowercase_=3_2 , lowercase_=3 , lowercase_=1_0 , lowercase_=[8, 1_6, 3_2, 6_4] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , lowercase_=["stage2", "stage3", "stage4"] , lowercase_=[2, 3, 4] , lowercase_=1 , ) -> Tuple: __snake_case = parent __snake_case = batch_size __snake_case = image_size __snake_case = num_channels __snake_case = embeddings_size __snake_case = hidden_sizes __snake_case = depths __snake_case = is_training __snake_case = use_labels __snake_case = hidden_act __snake_case = num_labels __snake_case = scope __snake_case = len(lowercase_) __snake_case = out_features __snake_case = out_indices __snake_case = num_groups def _a ( self) -> Union[str, Any]: __snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.num_labels) __snake_case = self.get_config() return config, pixel_values, labels def _a ( self) -> Optional[int]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def _a ( self , lowercase_ , lowercase_ , lowercase_) -> int: __snake_case = BitModel(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def _a ( self , lowercase_ , lowercase_ , lowercase_) -> str: __snake_case = self.num_labels __snake_case = BitForImageClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _a ( self , lowercase_ , lowercase_ , lowercase_) -> int: __snake_case = BitBackbone(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None __snake_case = None __snake_case = BitBackbone(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def _a ( self) -> Optional[int]: __snake_case = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case = config_and_inputs __snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __UpperCAmelCase = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def _a ( self) -> int: __snake_case = BitModelTester(self) __snake_case = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_) def _a ( self) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _a ( self) -> Optional[Any]: return @unittest.skip(reason='Bit does not output attentions') def _a ( self) -> str: pass @unittest.skip(reason='Bit does not use inputs_embeds') def _a ( self) -> List[Any]: pass @unittest.skip(reason='Bit does not support input and output embeddings') def _a ( self) -> Dict: pass def _a ( self) -> Dict: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(lowercase_) __snake_case = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case = [*signature.parameters.keys()] __snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase_) def _a ( self) -> List[str]: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> Optional[int]: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowercase_) def _a ( self) -> Any: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case = model_class(config=lowercase_) for name, module in model.named_modules(): if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) self.assertTrue( torch.all(module.bias == 0) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) def _a ( self) -> str: def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_): __snake_case = model_class(lowercase_) model.to(lowercase_) model.eval() with torch.no_grad(): __snake_case = model(**self._prepare_for_class(lowercase_ , lowercase_)) __snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case = self.model_tester.num_stages self.assertEqual(len(lowercase_) , expected_num_stages + 1) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: __snake_case = layer_type __snake_case = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_) @unittest.skip(reason='Bit does not use feedforward chunking') def _a ( self) -> Union[str, Any]: pass def _a ( self) -> Optional[int]: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_) @slow def _a ( self) -> List[str]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case = BitModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def A ( ) -> List[str]: '''simple docstring''' __snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __lowercase ( unittest.TestCase ): @cached_property def _a ( self) -> str: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def _a ( self) -> List[Any]: __snake_case = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowercase_) __snake_case = self.default_image_processor __snake_case = prepare_img() __snake_case = image_processor(images=lowercase_ , return_tensors='pt').to(lowercase_) # forward pass with torch.no_grad(): __snake_case = model(**lowercase_) # verify the logits __snake_case = torch.Size((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , lowercase_) __snake_case = torch.tensor([[-0.6526, -0.5263, -1.4398]]).to(lowercase_) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4)) @require_torch class __lowercase ( lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = (BitBackbone,) if is_torch_available() else () __UpperCAmelCase = BitConfig __UpperCAmelCase = False def _a ( self) -> List[Any]: __snake_case = BitModelTester(self)
676
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
676
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase__ : Dict = { "microsoft/unispeech-sat-base-100h-libri-ft": ( "https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''unispeech-sat''' def __init__( self , lowercase_=3_2 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=1e-5 , lowercase_="group" , lowercase_="gelu" , lowercase_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase_=(5, 2, 2, 2, 2, 2, 2) , lowercase_=(1_0, 3, 3, 3, 3, 2, 2) , lowercase_=False , lowercase_=1_2_8 , lowercase_=1_6 , lowercase_=False , lowercase_=True , lowercase_=0.05 , lowercase_=1_0 , lowercase_=2 , lowercase_=0.0 , lowercase_=1_0 , lowercase_=0 , lowercase_=3_2_0 , lowercase_=2 , lowercase_=0.1 , lowercase_=1_0_0 , lowercase_=2_5_6 , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_="mean" , lowercase_=False , lowercase_=False , lowercase_=2_5_6 , lowercase_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase_=(5, 3, 3, 1, 1) , lowercase_=(1, 2, 3, 1, 1) , lowercase_=5_1_2 , lowercase_=0 , lowercase_=1 , lowercase_=2 , lowercase_=5_0_4 , **lowercase_ , ) -> List[str]: super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_) __snake_case = hidden_size __snake_case = feat_extract_norm __snake_case = feat_extract_activation __snake_case = list(lowercase_) __snake_case = list(lowercase_) __snake_case = list(lowercase_) __snake_case = conv_bias __snake_case = num_conv_pos_embeddings __snake_case = num_conv_pos_embedding_groups __snake_case = len(self.conv_dim) __snake_case = num_hidden_layers __snake_case = intermediate_size __snake_case = hidden_act __snake_case = num_attention_heads __snake_case = hidden_dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = feat_proj_dropout __snake_case = final_dropout __snake_case = layerdrop __snake_case = layer_norm_eps __snake_case = initializer_range __snake_case = vocab_size __snake_case = num_clusters __snake_case = do_stable_layer_norm __snake_case = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`," F" `len(config.conv_kernel) = {len(self.conv_kernel)}`.") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __snake_case = apply_spec_augment __snake_case = mask_time_prob __snake_case = mask_time_length __snake_case = mask_time_min_masks __snake_case = mask_feature_prob __snake_case = mask_feature_length __snake_case = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __snake_case = num_codevectors_per_group __snake_case = num_codevector_groups __snake_case = contrastive_logits_temperature __snake_case = feat_quantizer_dropout __snake_case = num_negatives __snake_case = codevector_dim __snake_case = proj_codevector_dim __snake_case = diversity_loss_weight # ctc loss __snake_case = ctc_loss_reduction __snake_case = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. __snake_case = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __snake_case = list(lowercase_) __snake_case = list(lowercase_) __snake_case = list(lowercase_) __snake_case = xvector_output_dim @property def _a ( self) -> Optional[int]: return functools.reduce(operator.mul , self.conv_stride , 1)
676
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def A ( snake_case__ : List[Any] ) -> Any: '''simple docstring''' __snake_case = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __snake_case = 4 __snake_case = 48 __snake_case = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = [6, 6, 6, 6] __snake_case = 60 __snake_case = [6, 6, 6, 6] __snake_case = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = 4 __snake_case = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __snake_case = 1 __snake_case = 1 __snake_case = 126 __snake_case = 7 __snake_case = 255.0 __snake_case = '' return config def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: __snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: __snake_case = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: __snake_case = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: __snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __snake_case = name.replace('attn' , 'attention.self' ) if "norm1" in name: __snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: __snake_case = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: __snake_case = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: __snake_case = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: __snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: __snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": __snake_case = 'layernorm.weight' if name == "norm.bias": __snake_case = 'layernorm.bias' if "conv_first" in name: __snake_case = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __snake_case = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: __snake_case = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: __snake_case = name.replace('upsample.2' , 'upsample.convolution_1' ) __snake_case = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": __snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) __snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: __snake_case = 'swin2sr.' + name return name def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): __snake_case = orig_state_dict.pop(snake_case__ ) if "qkv" in key: __snake_case = key.split('.' ) __snake_case = int(key_split[1] ) __snake_case = int(key_split[4] ) __snake_case = config.embed_dim if "weight" in key: __snake_case = val[:dim, :] __snake_case = val[dim : dim * 2, :] __snake_case = val[-dim:, :] else: __snake_case = val[:dim] __snake_case = val[dim : dim * 2] __snake_case = val[-dim:] pass else: __snake_case = val return orig_state_dict def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple: '''simple docstring''' __snake_case = get_config(snake_case__ ) __snake_case = SwinaSRForImageSuperResolution(snake_case__ ) model.eval() __snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' ) __snake_case = convert_state_dict(snake_case__ , snake_case__ ) __snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0: raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"Unexpected key {key} in state_dict" ) # verify values __snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' __snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) __snake_case = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __snake_case = 126 if 'Jpeg' in checkpoint_url else 256 __snake_case = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __snake_case = transforms(snake_case__ ).unsqueeze(0 ) if config.num_channels == 1: __snake_case = pixel_values[:, 0, :, :].unsqueeze(1 ) __snake_case = model(snake_case__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 ) print('Looks ok!' ) __snake_case = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } __snake_case = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(snake_case__ ) if push_to_hub: model.push_to_hub(f"caidas/{model_name}" ) processor.push_to_hub(f"caidas/{model_name}" ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") UpperCAmelCase__ : Optional[Any] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
676
1
def A ( snake_case__ : int , snake_case__ : int ) -> str: '''simple docstring''' if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) __snake_case = str(bin(snake_case__ ) )[2:] # remove the leading "0b" __snake_case = str(bin(snake_case__ ) )[2:] # remove the leading "0b" __snake_case = max(len(snake_case__ ) , len(snake_case__ ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(snake_case__ ) , b_binary.zfill(snake_case__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ : int = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase__ : Tuple = { "Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json", # See all Marian models at https://huggingface.co/models?filter=marian } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''marian''' __UpperCAmelCase = ['''past_key_values'''] __UpperCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowercase_=5_8_1_0_1 , lowercase_=None , lowercase_=1_0_2_4 , lowercase_=1_2 , lowercase_=4_0_9_6 , lowercase_=1_6 , lowercase_=1_2 , lowercase_=4_0_9_6 , lowercase_=1_6 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_=True , lowercase_="gelu" , lowercase_=1_0_2_4 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=5_8_1_0_0 , lowercase_=False , lowercase_=5_8_1_0_0 , lowercase_=0 , lowercase_=0 , lowercase_=True , **lowercase_ , ) -> Optional[Any]: __snake_case = vocab_size __snake_case = decoder_vocab_size or vocab_size __snake_case = max_position_embeddings __snake_case = d_model __snake_case = encoder_ffn_dim __snake_case = encoder_layers __snake_case = encoder_attention_heads __snake_case = decoder_ffn_dim __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = activation_function __snake_case = init_std __snake_case = encoder_layerdrop __snake_case = decoder_layerdrop __snake_case = use_cache __snake_case = encoder_layers __snake_case = scale_embedding # scale factor will be sqrt(d_model) if True __snake_case = share_encoder_decoder_embeddings super().__init__( pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , ) class __lowercase ( lowerCamelCase__ ): @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs def _a ( self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __snake_case = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ]) if self.use_past: __snake_case = {0: 'batch'} __snake_case = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __snake_case = {0: 'batch', 1: 'decoder_sequence'} __snake_case = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowercase_ , direction='inputs') elif self.task == "causal-lm": # TODO: figure this case out. __snake_case = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ]) if self.use_past: __snake_case , __snake_case = self.num_layers for i in range(lowercase_): __snake_case = {0: 'batch', 2: 'past_sequence + sequence'} __snake_case = {0: 'batch', 2: 'past_sequence + sequence'} else: __snake_case = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ]) return common_inputs @property # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs def _a ( self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: __snake_case = super().outputs else: __snake_case = super(lowercase_ , self).outputs if self.use_past: __snake_case , __snake_case = self.num_layers for i in range(lowercase_): __snake_case = {0: 'batch', 2: 'past_sequence + sequence'} __snake_case = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def _a ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]: __snake_case = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) # Generate decoder inputs __snake_case = seq_length if not self.use_past else 1 __snake_case = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) __snake_case = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} __snake_case = dict(**lowercase_ , **lowercase_) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch __snake_case , __snake_case = common_inputs['input_ids'].shape __snake_case = common_inputs['decoder_input_ids'].shape[1] __snake_case , __snake_case = self.num_attention_heads __snake_case = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case = decoder_seq_length + 3 __snake_case = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __snake_case = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(lowercase_ , lowercase_)] , dim=1) __snake_case = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __snake_case , __snake_case = self.num_layers __snake_case = min(lowercase_ , lowercase_) __snake_case = max(lowercase_ , lowercase_) - min_num_layers __snake_case = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(lowercase_): common_inputs["past_key_values"].append( ( torch.zeros(lowercase_), torch.zeros(lowercase_), torch.zeros(lowercase_), torch.zeros(lowercase_), )) # TODO: test this. __snake_case = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(lowercase_ , lowercase_): common_inputs["past_key_values"].append((torch.zeros(lowercase_), torch.zeros(lowercase_))) return common_inputs def _a ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]: __snake_case = self._generate_dummy_inputs_for_encoder_and_decoder( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch __snake_case , __snake_case = common_inputs['input_ids'].shape # Not using the same length for past_key_values __snake_case = seqlen + 2 __snake_case , __snake_case = self.num_layers __snake_case , __snake_case = self.num_attention_heads __snake_case = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __snake_case = common_inputs['attention_mask'].dtype __snake_case = torch.cat( [common_inputs['attention_mask'], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_)] , dim=1) __snake_case = [ (torch.zeros(lowercase_), torch.zeros(lowercase_)) for _ in range(lowercase_) ] return common_inputs def _a ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __snake_case = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __snake_case = tokenizer.num_special_tokens_to_add(lowercase_) __snake_case = compute_effective_axis_dimension( lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_) # Generate dummy inputs according to compute batch and sequence __snake_case = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size __snake_case = dict(tokenizer(lowercase_ , return_tensors=lowercase_)) return common_inputs def _a ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: __snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_) else: __snake_case = self._generate_dummy_inputs_for_causal_lm( lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_) return common_inputs def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Any: if self.task in ["default", "seq2seq-lm"]: __snake_case = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_) else: __snake_case = super(lowercase_ , self)._flatten_past_key_values_( lowercase_ , lowercase_ , lowercase_ , lowercase_) @property def _a ( self) -> float: return 1e-4
676
from __future__ import annotations class __lowercase : def __init__( self , lowercase_) -> None: __snake_case = data __snake_case = None __snake_case = None def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def A ( snake_case__ : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def A ( snake_case__ : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def A ( ) -> None: # Main function for testing. '''simple docstring''' __snake_case = Node(1 ) __snake_case = Node(2 ) __snake_case = Node(3 ) __snake_case = Node(4 ) __snake_case = Node(5 ) __snake_case = Node(6 ) __snake_case = Node(7 ) __snake_case = Node(8 ) __snake_case = Node(9 ) print(is_full_binary_tree(snake_case__ ) ) print(depth_of_tree(snake_case__ ) ) print('Tree is: ' ) display(snake_case__ ) if __name__ == "__main__": main()
676
1
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration UpperCAmelCase__ : List[str] = 5_00_00 UpperCAmelCase__ : List[Any] = 50_00 UpperCAmelCase__ , UpperCAmelCase__ : int = os.path.split(__file__) UpperCAmelCase__ : List[Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def A ( snake_case__ : datasets.Dataset , snake_case__ : Optional[int] ) -> List[Any]: '''simple docstring''' for i in range(snake_case__ ): __snake_case = dataset[i] @get_duration def A ( snake_case__ : datasets.Dataset , snake_case__ : Any , snake_case__ : str ) -> Optional[int]: '''simple docstring''' for i in range(0 , len(snake_case__ ) , snake_case__ ): __snake_case = dataset[i : i + batch_size] @get_duration def A ( snake_case__ : datasets.Dataset , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ) -> List[str]: '''simple docstring''' with dataset.formatted_as(type=snake_case__ ): for i in range(snake_case__ ): __snake_case = dataset[i] @get_duration def A ( snake_case__ : datasets.Dataset , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Tuple ) -> int: '''simple docstring''' with dataset.formatted_as(type=snake_case__ ): for i in range(0 , snake_case__ , snake_case__ ): __snake_case = dataset[i : i + batch_size] def A ( ) -> str: '''simple docstring''' __snake_case = {'num examples': SPEED_TEST_N_EXAMPLES} __snake_case = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted, {'type': 'pandas', 'length': SMALL_TEST}), (read_formatted, {'type': 'torch', 'length': SMALL_TEST}), (read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] __snake_case = [ (read, {'length': SMALL_TEST}), (read, {'length': SPEED_TEST_N_EXAMPLES}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}), (read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1000}), (read_formatted, {'type': 'numpy', 'length': SMALL_TEST}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}), (read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print('generating dataset' ) __snake_case = datasets.Features( {'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} ) __snake_case = generate_example_dataset( os.path.join(snake_case__ , 'dataset.arrow' ) , snake_case__ , num_examples=snake_case__ , seq_shapes={'list': (100,)} , ) print('first set of iterations' ) for func, kwargs in functions: print(func.__name__ , str(snake_case__ ) ) __snake_case = func(snake_case__ , **snake_case__ ) print('shuffling dataset' ) __snake_case = dataset.shuffle() print('Second set of iterations (after shuffling' ) for func, kwargs in functions_shuffled: print('shuffled ' , func.__name__ , str(snake_case__ ) ) __snake_case = func( snake_case__ , **snake_case__ ) with open(snake_case__ , 'wb' ) as f: f.write(json.dumps(snake_case__ ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
676
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : int = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''table-transformer''' __UpperCAmelCase = ['''past_key_values'''] __UpperCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') __snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(lowercase_ , lowercase_): __snake_case = backbone_config.get('model_type') __snake_case = CONFIG_MAPPING[backbone_model_type] __snake_case = config_class.from_dict(lowercase_) # set timm attributes to None __snake_case , __snake_case , __snake_case = None, None, None __snake_case = use_timm_backbone __snake_case = backbone_config __snake_case = num_channels __snake_case = num_queries __snake_case = d_model __snake_case = encoder_ffn_dim __snake_case = encoder_layers __snake_case = encoder_attention_heads __snake_case = decoder_ffn_dim __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = activation_function __snake_case = init_std __snake_case = init_xavier_std __snake_case = encoder_layerdrop __snake_case = decoder_layerdrop __snake_case = encoder_layers __snake_case = auxiliary_loss __snake_case = position_embedding_type __snake_case = backbone __snake_case = use_pretrained_backbone __snake_case = dilation # Hungarian matcher __snake_case = class_cost __snake_case = bbox_cost __snake_case = giou_cost # Loss coefficients __snake_case = mask_loss_coefficient __snake_case = dice_loss_coefficient __snake_case = bbox_loss_coefficient __snake_case = giou_loss_coefficient __snake_case = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_) @property def _a ( self) -> int: return self.encoder_attention_heads @property def _a ( self) -> int: return self.d_model class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = version.parse('''1.11''' ) @property def _a ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def _a ( self) -> float: return 1e-5 @property def _a ( self) -> int: return 1_2
676
1
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING UpperCAmelCase__ : int = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase__ ) class __lowercase ( lowerCamelCase__ ): def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: super().__init__(*lowercase_ , **lowercase_) self.check_model_type(lowercase_) def _a ( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_) -> str: __snake_case , __snake_case = {}, {} if padding is not None: __snake_case = padding if truncation is not None: __snake_case = truncation if top_k is not None: __snake_case = top_k return preprocess_params, {}, postprocess_params def __call__( self , lowercase_ , lowercase_ = None , **lowercase_) -> List[Any]: if isinstance(lowercase_ , (Image.Image, str)) and isinstance(lowercase_ , lowercase_): __snake_case = {'image': image, 'question': question} else: __snake_case = image __snake_case = super().__call__(lowercase_ , **lowercase_) return results def _a ( self , lowercase_ , lowercase_=False , lowercase_=False) -> Tuple: __snake_case = load_image(inputs['image']) __snake_case = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=lowercase_ , truncation=lowercase_) __snake_case = self.image_processor(images=lowercase_ , return_tensors=self.framework) model_inputs.update(lowercase_) return model_inputs def _a ( self , lowercase_) -> Union[str, Any]: __snake_case = self.model(**lowercase_) return model_outputs def _a ( self , lowercase_ , lowercase_=5) -> Optional[Any]: if top_k > self.model.config.num_labels: __snake_case = self.model.config.num_labels if self.framework == "pt": __snake_case = model_outputs.logits.sigmoid()[0] __snake_case , __snake_case = probs.topk(lowercase_) else: raise ValueError(F"Unsupported framework: {self.framework}") __snake_case = scores.tolist() __snake_case = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowercase_ , lowercase_)]
676
from maths.prime_check import is_prime def A ( snake_case__ : int ) -> int: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
import argparse import os import re import packaging.version UpperCAmelCase__ : int = "examples/" UpperCAmelCase__ : int = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } UpperCAmelCase__ : List[str] = { "init": "src/diffusers/__init__.py", "setup": "setup.py", } UpperCAmelCase__ : Tuple = "README.md" def A ( snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int ) -> Optional[Any]: '''simple docstring''' with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f: __snake_case = f.read() __snake_case , __snake_case = REPLACE_PATTERNS[pattern] __snake_case = replace.replace('VERSION' , snake_case__ ) __snake_case = re_pattern.sub(snake_case__ , snake_case__ ) with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(snake_case__ ) def A ( snake_case__ : int ) -> Any: '''simple docstring''' for folder, directories, fnames in os.walk(snake_case__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(snake_case__ , snake_case__ ) , snake_case__ , pattern='examples' ) def A ( snake_case__ : Dict , snake_case__ : Optional[Any]=False ) -> List[str]: '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(snake_case__ , snake_case__ , snake_case__ ) if not patch: update_version_in_examples(snake_case__ ) def A ( ) -> List[Any]: '''simple docstring''' __snake_case = '🤗 Transformers currently provides the following architectures' __snake_case = '1. Want to contribute a new model?' with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f: __snake_case = f.readlines() # Find the start of the list. __snake_case = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __snake_case = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): __snake_case = lines[index].replace( 'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , ) index += 1 with open(snake_case__ , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(snake_case__ ) def A ( ) -> Optional[Any]: '''simple docstring''' with open(REPLACE_FILES['init'] , 'r' ) as f: __snake_case = f.read() __snake_case = REPLACE_PATTERNS['init'][0].search(snake_case__ ).groups()[0] return packaging.version.parse(snake_case__ ) def A ( snake_case__ : str=False ) -> Tuple: '''simple docstring''' __snake_case = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: __snake_case = default_version.base_version elif patch: __snake_case = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: __snake_case = f"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. __snake_case = input(f"Which version are you releasing? [{default_version}]" ) if len(snake_case__ ) == 0: __snake_case = default_version print(f"Updating version to {version}." ) global_version_update(snake_case__ , patch=snake_case__ ) def A ( ) -> Any: '''simple docstring''' __snake_case = get_version() __snake_case = f"{current_version.major}.{current_version.minor + 1}.0.dev0" __snake_case = current_version.base_version # Check with the user we got that right. __snake_case = input(f"Which version are we developing now? [{dev_version}]" ) if len(snake_case__ ) == 0: __snake_case = dev_version print(f"Updating version to {version}." ) global_version_update(snake_case__ ) # print("Cleaning main README, don't forget to run `make fix-copies`.") # clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") UpperCAmelCase__ : Optional[int] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
676
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]: '''simple docstring''' __snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
676
1
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) UpperCAmelCase__ : Optional[int] = logging.getLogger() UpperCAmelCase__ : Union[str, Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowercase ( lowerCamelCase__ ): def _a ( self , lowercase_) -> Tuple: os.makedirs(lowercase_ , exist_ok=lowercase_) __snake_case = {'source': 'What is love ?', 'target': 'life'} __snake_case = {'train': 1_2, 'val': 2, 'test': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: __snake_case = '\n'.join([contents[field]] * n_lines[split]) with open(os.path.join(lowercase_ , F"{split}.{field}") , 'w') as f: f.write(lowercase_) def _a ( self , lowercase_ , lowercase_ = "pytorch") -> Dict: __snake_case = self.get_auto_remove_tmp_dir() __snake_case = os.path.join(lowercase_ , 'output') __snake_case = os.path.join(lowercase_ , 'data') self._create_dummy_data(data_dir=lowercase_) __snake_case = F"\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n ".split() if gpus > 0: testargs.append(F"--gpus={gpus}") if is_apex_available(): testargs.append('--fp16') else: testargs.append('--gpus=0') testargs.append('--distributed_backend=ddp_cpu') testargs.append('--num_processes=2') __snake_case = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs execute_subprocess_async(lowercase_ , env=self.get_env()) __snake_case = os.path.join(lowercase_ , 'metrics.json') with open(lowercase_) as f: __snake_case = json.load(lowercase_) return result @require_torch_gpu def _a ( self) -> Optional[Any]: __snake_case = self._run_finetune(gpus=1) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2) @require_torch_multi_gpu def _a ( self) -> Tuple: __snake_case = self._run_finetune(gpus=2) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2) @require_torch_gpu @require_ray def _a ( self) -> Union[str, Any]: __snake_case = self._run_finetune(gpus=1 , distributed_retriever='ray') self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2) @require_torch_multi_gpu @require_ray def _a ( self) -> Dict: __snake_case = self._run_finetune(gpus=1 , distributed_retriever='ray') self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
676
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCAmelCase__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["memory_attention", "encoder_attn"], ["attention", "attn"], ["/", "."], [".LayerNorm.gamma", "_layer_norm.weight"], [".LayerNorm.beta", "_layer_norm.bias"], ["r.layer_", "r.layers."], ["output_proj", "out_proj"], ["ffn.dense_1.", "fc2."], ["ffn.dense.", "fc1."], ["ffn_layer_norm", "final_layer_norm"], ["kernel", "weight"], ["encoder_layer_norm.", "encoder.layer_norm."], ["decoder_layer_norm.", "decoder.layer_norm."], ["embeddings.weights", "shared.weight"], ] def A ( snake_case__ : List[Any] ) -> str: '''simple docstring''' for pegasus_name, hf_name in PATTERNS: __snake_case = k.replace(snake_case__ , snake_case__ ) return k def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration: '''simple docstring''' __snake_case = DEFAULTS.copy() cfg_kwargs.update(snake_case__ ) __snake_case = PegasusConfig(**snake_case__ ) __snake_case = PegasusForConditionalGeneration(snake_case__ ) __snake_case = torch_model.model.state_dict() __snake_case = {} for k, v in tf_weights.items(): __snake_case = rename_state_dict_key(snake_case__ ) if new_k not in sd: raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if "dense" in k or "proj" in new_k: __snake_case = v.T __snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}" # make sure embedding.padding_idx is respected __snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] ) __snake_case = mapping['shared.weight'] __snake_case = mapping['shared.weight'] __snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping} mapping.update(**snake_case__ ) __snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ ) __snake_case = [ k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight'] ] assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}" assert extra == [], f"no matches found for the following tf keys {extra}" return torch_model def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict: '''simple docstring''' __snake_case = tf.train.list_variables(snake_case__ ) __snake_case = {} __snake_case = ['Adafactor', 'global_step'] for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ): __snake_case = any(pat in name for pat in ignore_name ) if skip_key: continue __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) __snake_case = array return tf_weights def A ( snake_case__ : str , snake_case__ : str ) -> Tuple: '''simple docstring''' # save tokenizer first __snake_case = Path(snake_case__ ).parent.name __snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings'] __snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(snake_case__ ) # convert model __snake_case = get_tf_weights_as_numpy(snake_case__ ) __snake_case = task_specific_params[f"summarization_{dataset}"] if dataset == "large": __snake_case = task_specific_params __snake_case = convert_pegasus(snake_case__ , snake_case__ ) torch_model.save_pretrained(snake_case__ ) __snake_case = torch_model.state_dict() sd.pop('model.decoder.embed_positions.weight' ) sd.pop('model.encoder.embed_positions.weight' ) torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' ) if __name__ == "__main__": UpperCAmelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ : int = parser.parse_args() if args.save_dir is None: UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name UpperCAmelCase__ : str = os.path.join("pegasus", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
676
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__) UpperCAmelCase__ : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } UpperCAmelCase__ : List[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def A ( snake_case__ : int ) -> int: '''simple docstring''' __snake_case = {} with open(snake_case__ , 'r' ) as file: for line_number, line in enumerate(snake_case__ ): __snake_case = line.strip() if line: __snake_case = line.split() __snake_case = line_number __snake_case = words[0] __snake_case = value return result def A ( snake_case__ : List[str] , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' for attribute in key.split('.' ): __snake_case = getattr(snake_case__ , snake_case__ ) __snake_case = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case__ ): __snake_case = PARAM_MAPPING[full_name.split('.' )[-1]] __snake_case = 'param' if weight_type is not None and weight_type != "param": __snake_case = getattr(snake_case__ , snake_case__ ).shape elif weight_type is not None and weight_type == "param": __snake_case = hf_pointer for attribute in hf_param_name.split('.' ): __snake_case = getattr(snake_case__ , snake_case__ ) __snake_case = shape_pointer.shape # let's reduce dimension __snake_case = value[0] else: __snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __snake_case = value elif weight_type == "weight_g": __snake_case = value elif weight_type == "weight_v": __snake_case = value elif weight_type == "bias": __snake_case = value elif weight_type == "param": for attribute in hf_param_name.split('.' ): __snake_case = getattr(snake_case__ , snake_case__ ) __snake_case = value else: __snake_case = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def A ( snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case__ ): __snake_case = PARAM_MAPPING[full_name.split('.' )[-1]] __snake_case = 'param' if weight_type is not None and weight_type != "param": __snake_case = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __snake_case = '.'.join([key, hf_param_name] ) else: __snake_case = key __snake_case = value if 'lm_head' in full_key else value[0] UpperCAmelCase__ : Tuple = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def A ( snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str]=None , snake_case__ : str=None ) -> str: '''simple docstring''' __snake_case = False for key, mapped_key in MAPPING.items(): __snake_case = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __snake_case = True if "*" in mapped_key: __snake_case = name.split(snake_case__ )[0].split('.' )[-2] __snake_case = mapped_key.replace('*' , snake_case__ ) if "weight_g" in name: __snake_case = 'weight_g' elif "weight_v" in name: __snake_case = 'weight_v' elif "bias" in name: __snake_case = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case = 'weight' else: __snake_case = None if hf_dict is not None: rename_dict(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) else: set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) return is_used return is_used def A ( snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[int] ) -> List[str]: '''simple docstring''' __snake_case = [] __snake_case = fairseq_model.state_dict() __snake_case = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __snake_case = False if "conv_layers" in name: load_conv_layer( snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == 'group' , ) __snake_case = True else: __snake_case = load_wavaveca_layer(snake_case__ , snake_case__ , snake_case__ ) if not is_used: unused_weights.append(snake_case__ ) logger.warning(f"Unused weights: {unused_weights}" ) def A ( snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Dict ) -> Any: '''simple docstring''' __snake_case = full_name.split('conv_layers.' )[-1] __snake_case = name.split('.' ) __snake_case = int(items[0] ) __snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __snake_case = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __snake_case = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __snake_case = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __snake_case = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(snake_case__ ) @torch.no_grad() def A ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : int=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=True , snake_case__ : Optional[int]=False ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: __snake_case = WavaVecaConfig.from_pretrained(snake_case__ ) else: __snake_case = WavaVecaConfig() if is_seq_class: __snake_case = read_txt_into_dict(snake_case__ ) __snake_case = idalabel __snake_case = WavaVecaForSequenceClassification(snake_case__ ) __snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) feature_extractor.save_pretrained(snake_case__ ) elif is_finetuned: if dict_path: __snake_case = Dictionary.load(snake_case__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __snake_case = target_dict.pad_index __snake_case = target_dict.bos_index __snake_case = target_dict.eos_index __snake_case = len(target_dict.symbols ) __snake_case = os.path.join(snake_case__ , 'vocab.json' ) if not os.path.isdir(snake_case__ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(snake_case__ ) ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) __snake_case = target_dict.indices # fairseq has the <pad> and <s> switched __snake_case = 0 __snake_case = 1 with open(snake_case__ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(snake_case__ , snake_case__ ) __snake_case = WavaVecaCTCTokenizer( snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=snake_case__ , ) __snake_case = True if config.feat_extract_norm == 'layer' else False __snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , ) __snake_case = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ ) processor.save_pretrained(snake_case__ ) __snake_case = WavaVecaForCTC(snake_case__ ) else: __snake_case = WavaVecaForPreTraining(snake_case__ ) if is_finetuned or is_seq_class: __snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: __snake_case = argparse.Namespace(task='audio_pretraining' ) __snake_case = fairseq.tasks.setup_task(snake_case__ ) __snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case__ ) __snake_case = model[0].eval() recursively_load_weights(snake_case__ , snake_case__ , not is_finetuned ) hf_wavavec.save_pretrained(snake_case__ ) if __name__ == "__main__": UpperCAmelCase__ : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) UpperCAmelCase__ : Tuple = parser.parse_args() UpperCAmelCase__ : Dict = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
676
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __lowercase ( lowerCamelCase__ ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]: super().__init__() if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , ) def _a ( self , lowercase_ = "auto") -> Union[str, Any]: if slice_size == "auto": __snake_case = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_) def _a ( self) -> Any: self.enable_attention_slicing(lowercase_) @torch.no_grad() def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]: __snake_case = self.speech_processor.feature_extractor( lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device) __snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0) __snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[ 0 ] if isinstance(lowercase_ , lowercase_): __snake_case = 1 elif isinstance(lowercase_ , lowercase_): __snake_case = len(lowercase_) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(lowercase_)}.") # get prompt text embeddings __snake_case = self.tokenizer( lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) __snake_case = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F" {self.tokenizer.model_max_length} tokens: {removed_text}") __snake_case = text_input_ids[:, : self.tokenizer.model_max_length] __snake_case = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case , __snake_case , __snake_case = text_embeddings.shape __snake_case = text_embeddings.repeat(1 , lowercase_ , 1) __snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case = 42 if negative_prompt is None: __snake_case = [''] * batch_size elif type(lowercase_) is not type(lowercase_): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !=" F" {type(lowercase_)}.") elif isinstance(lowercase_ , lowercase_): __snake_case = [negative_prompt] elif batch_size != len(lowercase_): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" ' the batch size of `prompt`.') else: __snake_case = negative_prompt __snake_case = text_input_ids.shape[-1] __snake_case = self.tokenizer( lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , ) __snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case = uncond_embeddings.shape[1] __snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1) __snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to( self.device) else: __snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_) else: if latents.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") __snake_case = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(lowercase_) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler __snake_case = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) __snake_case = {} if accepts_eta: __snake_case = eta for i, t in enumerate(self.progress_bar(lowercase_)): # expand the latents if we are doing classifier free guidance __snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents __snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_) # predict the noise residual __snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample # perform guidance if do_classifier_free_guidance: __snake_case , __snake_case = noise_pred.chunk(2) __snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ , lowercase_) __snake_case = 1 / 0.1_8215 * latents __snake_case = self.vae.decode(lowercase_).sample __snake_case = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": __snake_case = self.numpy_to_pil(lowercase_) if not return_dict: return image return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
676
1
from ....configuration_utils import PretrainedConfig from ....utils import logging UpperCAmelCase__ : Dict = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = { "Visual-Attention-Network/van-base": ( "https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json" ), } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''van''' def __init__( self , lowercase_=2_2_4 , lowercase_=3 , lowercase_=[7, 3, 3, 3] , lowercase_=[4, 2, 2, 2] , lowercase_=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowercase_=[3, 3, 1_2, 3] , lowercase_=[8, 8, 4, 4] , lowercase_="gelu" , lowercase_=0.02 , lowercase_=1e-6 , lowercase_=1e-2 , lowercase_=0.0 , lowercase_=0.0 , **lowercase_ , ) -> Union[str, Any]: super().__init__(**lowercase_) __snake_case = image_size __snake_case = num_channels __snake_case = patch_sizes __snake_case = strides __snake_case = hidden_sizes __snake_case = depths __snake_case = mlp_ratios __snake_case = hidden_act __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = layer_scale_init_value __snake_case = drop_path_rate __snake_case = dropout_rate
676
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __lowercase ( lowerCamelCase__ ): def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple: super().__init__(*lowercase_ , **lowercase_) __snake_case = eval_examples __snake_case = post_process_function def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]: __snake_case = gen_kwargs.copy() __snake_case = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length ) __snake_case = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams ) __snake_case = gen_kwargs __snake_case = self.eval_dataset if eval_dataset is None else eval_dataset __snake_case = self.get_eval_dataloader(lowercase_) __snake_case = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_) __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) else: __snake_case = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowercase_) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_) return metrics def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]: __snake_case = gen_kwargs.copy() __snake_case = self.get_test_dataloader(lowercase_) # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict') __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
676
1
def A ( snake_case__ : str , snake_case__ : str ) -> bool: '''simple docstring''' __snake_case = len(snake_case__ ) __snake_case = len(snake_case__ ) __snake_case = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __snake_case = True for i in range(snake_case__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __snake_case = True if a[i].islower(): __snake_case = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
676
from __future__ import annotations UpperCAmelCase__ : Dict = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: '''simple docstring''' __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the reference grid __snake_case = 1 __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the action grid __snake_case = init[0] __snake_case = init[1] __snake_case = 0 __snake_case = g + heuristic[x][y] # cost from starting cell to destination cell __snake_case = [[f, g, x, y]] __snake_case = False # flag that is set when search is complete __snake_case = False # flag set if we can't find expand while not found and not resign: if len(snake_case__ ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __snake_case = cell.pop() __snake_case = next_cell[2] __snake_case = next_cell[3] __snake_case = next_cell[1] if x == goal[0] and y == goal[1]: __snake_case = True else: for i in range(len(snake_case__ ) ): # to try out different valid actions __snake_case = x + DIRECTIONS[i][0] __snake_case = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __snake_case = g + cost __snake_case = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __snake_case = 1 __snake_case = i __snake_case = [] __snake_case = goal[0] __snake_case = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __snake_case = x - DIRECTIONS[action[x][y]][0] __snake_case = y - DIRECTIONS[action[x][y]][1] __snake_case = xa __snake_case = ya invpath.append([x, y] ) __snake_case = [] for i in range(len(snake_case__ ) ): path.append(invpath[len(snake_case__ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCAmelCase__ : str = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCAmelCase__ : int = [0, 0] # all coordinates are given in format [y,x] UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1] UpperCAmelCase__ : Optional[Any] = 1 # the cost map which pushes the path closer to the goal UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCAmelCase__ : Optional[int] = 99 UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
676
1
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase__ : List[Any] = logging.get_logger(__name__) UpperCAmelCase__ : Tuple = { "nvidia/segformer-b0-finetuned-ade-512-512": ( "https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''segformer''' def __init__( self , lowercase_=3 , lowercase_=4 , lowercase_=[2, 2, 2, 2] , lowercase_=[8, 4, 2, 1] , lowercase_=[3_2, 6_4, 1_6_0, 2_5_6] , lowercase_=[7, 3, 3, 3] , lowercase_=[4, 2, 2, 2] , lowercase_=[1, 2, 5, 8] , lowercase_=[4, 4, 4, 4] , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=2_5_6 , lowercase_=2_5_5 , **lowercase_ , ) -> int: super().__init__(**lowercase_) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowercase_ , ) __snake_case = num_channels __snake_case = num_encoder_blocks __snake_case = depths __snake_case = sr_ratios __snake_case = hidden_sizes __snake_case = patch_sizes __snake_case = strides __snake_case = mlp_ratios __snake_case = num_attention_heads __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = classifier_dropout_prob __snake_case = initializer_range __snake_case = drop_path_rate __snake_case = layer_norm_eps __snake_case = decoder_hidden_size __snake_case = kwargs.get('reshape_last_stage' , lowercase_) __snake_case = semantic_loss_ignore_index class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = version.parse('''1.11''' ) @property def _a ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def _a ( self) -> float: return 1e-4 @property def _a ( self) -> int: return 1_2
676
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase__ : Any = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class __lowercase ( unittest.TestCase ): def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict: __snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))] if identifier is not None: __snake_case = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_): for n_ in n_identifier: __snake_case = [file for file in files if n_ not in file] else: __snake_case = [file for file in files if n_identifier not in file] __snake_case = ignore_files or [] ignore_files.append('__init__.py') __snake_case = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' , lowercase_) if only_modules: __snake_case = file.split('.')[0] try: __snake_case = getattr(lowercase_ , lowercase_) __snake_case = doctest.DocTestSuite(lowercase_) __snake_case = unittest.TextTestRunner().run(lowercase_) self.assertIs(len(result.failures) , 0) except AttributeError: logger.info(F"{module_identifier} is not a module.") else: __snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS) self.assertIs(result.failed , 0) def _a ( self) -> str: __snake_case = Path('src/transformers') __snake_case = 'modeling' __snake_case = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_) def _a ( self) -> Optional[Any]: __snake_case = Path('src/transformers') __snake_case = 'tokenization' self.analyze_directory(lowercase_ , identifier=lowercase_) def _a ( self) -> List[str]: __snake_case = Path('src/transformers') __snake_case = 'configuration' self.analyze_directory(lowercase_ , identifier=lowercase_) def _a ( self) -> Dict: __snake_case = Path('src/transformers') __snake_case = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(lowercase_ , n_identifier=lowercase_) def _a ( self) -> Dict: __snake_case = Path('docs/source') __snake_case = ['favicon.ico'] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
676
1
import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __lowercase ( unittest.TestCase ): @property def _a ( self) -> Optional[int]: torch.manual_seed(0) __snake_case = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def _a ( self) -> Optional[int]: __snake_case = self.dummy_uncond_unet __snake_case = ScoreSdeVeScheduler() __snake_case = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_) sde_ve.to(lowercase_) sde_ve.set_progress_bar_config(disable=lowercase_) __snake_case = torch.manual_seed(0) __snake_case = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_).images __snake_case = torch.manual_seed(0) __snake_case = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ , return_dict=lowercase_)[ 0 ] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) __snake_case = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch class __lowercase ( unittest.TestCase ): def _a ( self) -> Tuple: __snake_case = 'google/ncsnpp-church-256' __snake_case = UNetaDModel.from_pretrained(lowercase_) __snake_case = ScoreSdeVeScheduler.from_pretrained(lowercase_) __snake_case = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_) sde_ve.to(lowercase_) sde_ve.set_progress_bar_config(disable=lowercase_) __snake_case = torch.manual_seed(0) __snake_case = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=lowercase_).images __snake_case = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) __snake_case = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
676
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations(snake_case__ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations_with_dp_array( snake_case__ : int , snake_case__ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __snake_case = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __snake_case = answer return answer __snake_case = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' __snake_case = [0] * (target + 1) __snake_case = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ : str = 3 UpperCAmelCase__ : Optional[int] = 5 UpperCAmelCase__ : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
676
1
import math import sys def A ( snake_case__ : str ) -> str: '''simple docstring''' __snake_case = '' try: with open(snake_case__ , 'rb' ) as binary_file: __snake_case = binary_file.read() for dat in data: __snake_case = f"{dat:08b}" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def A ( snake_case__ : str ) -> str: '''simple docstring''' __snake_case = {'0': '0', '1': '1'} __snake_case , __snake_case = '', '' __snake_case = len(snake_case__ ) for i in range(len(snake_case__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __snake_case = lexicon[curr_string] result += last_match_id __snake_case = last_match_id + '0' if math.loga(snake_case__ ).is_integer(): __snake_case = {} for curr_key in list(snake_case__ ): __snake_case = lexicon.pop(snake_case__ ) __snake_case = new_lex __snake_case = last_match_id + '1' index += 1 __snake_case = '' return result def A ( snake_case__ : str , snake_case__ : str ) -> None: '''simple docstring''' __snake_case = 8 try: with open(snake_case__ , 'wb' ) as opened_file: __snake_case = [ to_write[i : i + byte_length] for i in range(0 , len(snake_case__ ) , snake_case__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def A ( snake_case__ : str ) -> str: '''simple docstring''' __snake_case = 0 for letter in data_bits: if letter == "1": break counter += 1 __snake_case = data_bits[counter:] __snake_case = data_bits[counter + 1 :] return data_bits def A ( snake_case__ : str , snake_case__ : str ) -> None: '''simple docstring''' __snake_case = read_file_binary(snake_case__ ) __snake_case = remove_prefix(snake_case__ ) __snake_case = decompress_data(snake_case__ ) write_file_binary(snake_case__ , snake_case__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
676
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration @require_faiss class __lowercase ( lowerCamelCase__ ): def _a ( self) -> List[str]: __snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]}) return dset def _a ( self) -> Optional[int]: import faiss __snake_case = self._create_dummy_dataset() __snake_case = dset.map( lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_) __snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT) __snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') dset.drop_index('vecs') def _a ( self) -> str: import faiss __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def _a ( self) -> int: import faiss __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name) dset.load_faiss_index('vecs2' , tmp_file.name) os.unlink(tmp_file.name) __snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def _a ( self) -> List[Any]: __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs') dset.drop_index('vecs') self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa))) def _a ( self) -> Any: from elasticsearch import Elasticsearch __snake_case = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: __snake_case = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 3_0) __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}} __snake_case = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=lowercase_) __snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29') self.assertEqual(examples['filename'][0] , 'my_name-train_29') @require_faiss class __lowercase ( lowerCamelCase__ ): def _a ( self) -> Optional[int]: import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsNotNone(index.faiss_index) self.assertEqual(index.faiss_index.ntotal , 5) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa)) self.assertEqual(index.faiss_index.ntotal , 1_0) # single query __snake_case = np.zeros(5 , dtype=np.floataa) __snake_case = 1 __snake_case , __snake_case = index.search(lowercase_) self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1)) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) # batched queries __snake_case = np.eye(5 , dtype=np.floataa)[::-1] __snake_case , __snake_case = index.search_batch(lowercase_) self.assertRaises(lowercase_ , index.search_batch , queries[0]) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([4, 3, 2, 1, 0] , lowercase_) def _a ( self) -> str: import faiss __snake_case = FaissIndex(string_factory='Flat') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) __snake_case = FaissIndex(string_factory='LSH') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexLSH) with self.assertRaises(lowercase_): __snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5)) def _a ( self) -> Optional[int]: import faiss __snake_case = faiss.IndexFlat(5) __snake_case = FaissIndex(custom_index=lowercase_) index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) def _a ( self) -> Tuple: import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5 , dtype=np.floataa)) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file: index.save(tmp_file.name) __snake_case = FaissIndex.load(tmp_file.name) os.unlink(tmp_file.name) __snake_case = np.zeros(5 , dtype=np.floataa) __snake_case = 1 __snake_case , __snake_case = index.search(lowercase_) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) @require_faiss def A ( snake_case__ : List[str] ) -> List[Any]: '''simple docstring''' import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __snake_case = 'index.faiss' __snake_case = f"mock://{index_name}" index.save(snake_case__ , storage_options=mockfs.storage_options ) __snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options ) __snake_case = np.zeros(5 , dtype=np.floataa ) __snake_case = 1 __snake_case , __snake_case = index.search(snake_case__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class __lowercase ( lowerCamelCase__ ): def _a ( self) -> Optional[Any]: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: __snake_case = Elasticsearch() __snake_case = {'acknowledged': True} __snake_case = ElasticSearchIndex(es_client=lowercase_) mocked_bulk.return_value([(True, None)] * 3) index.add_documents(['foo', 'bar', 'foobar']) # single query __snake_case = 'foo' __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case = index.search(lowercase_) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # single query with timeout __snake_case = 'foo' __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # batched queries __snake_case = ['foo', 'bar', 'foobar'] __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case = index.search_batch(lowercase_) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([1, 1, 1] , lowercase_) # batched queries with timeout __snake_case = ['foo', 'bar', 'foobar'] __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([1, 1, 1] , lowercase_)
676
1
def A ( snake_case__ : float ) -> float: '''simple docstring''' return 10 - x * x def A ( snake_case__ : float , snake_case__ : float ) -> float: '''simple docstring''' # Bolzano theory in order to find if there is a root between a and b if equation(snake_case__ ) * equation(snake_case__ ) >= 0: raise ValueError('Wrong space!' ) __snake_case = a while (b - a) >= 0.01: # Find middle point __snake_case = (a + b) / 2 # Check if middle point is root if equation(snake_case__ ) == 0.0: break # Decide the side to repeat the steps if equation(snake_case__ ) * equation(snake_case__ ) < 0: __snake_case = c else: __snake_case = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
676
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]: '''simple docstring''' __snake_case = args.log_outputs __snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric __snake_case = load_metric('wer' ) __snake_case = load_metric('cer' ) # compute metrics __snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] ) __snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results __snake_case = f"WER: {wer_result}\nCER: {cer_result}" print(snake_case__ ) with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f: f.write(snake_case__ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __snake_case = f"log_{dataset_id}_predictions.txt" __snake_case = f"log_{dataset_id}_targets.txt" with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t: # mapping function to write output def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ): p.write(f"{i}" + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f"{i}" + '\n' ) t.write(batch['target'] + '\n' ) result.map(snake_case__ , with_indices=snake_case__ ) def A ( snake_case__ : str ) -> str: '''simple docstring''' __snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __snake_case = re.sub(snake_case__ , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __snake_case = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: __snake_case = ' '.join(text.split(snake_case__ ) ) return text def A ( snake_case__ : int ) -> Optional[int]: '''simple docstring''' # load dataset __snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __snake_case = AutoFeatureExtractor.from_pretrained(args.model_id ) __snake_case = feature_extractor.sampling_rate # resample audio __snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) ) # load eval pipeline if args.device is None: __snake_case = 0 if torch.cuda.is_available() else -1 __snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case__ : Optional[Any] ): __snake_case = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __snake_case = prediction['text'] __snake_case = normalize_text(batch['sentence'] ) return batch # run inference on all examples __snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case__ , snake_case__ ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) UpperCAmelCase__ : str = parser.parse_args() main(args)
676
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ : Tuple = { "configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"], "processing_layoutlmv2": ["LayoutLMv2Processor"], "tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = ["LayoutLMv2TokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["LayoutLMv2FeatureExtractor"] UpperCAmelCase__ : Union[str, Any] = ["LayoutLMv2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[Any] = [ "LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMv2ForQuestionAnswering", "LayoutLMv2ForSequenceClassification", "LayoutLMv2ForTokenClassification", "LayoutLMv2Layer", "LayoutLMv2Model", "LayoutLMv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys UpperCAmelCase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def A ( *snake_case__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' with open(snake_case__ , 'r' ) as fh: fcntl.flock(snake_case__ , fcntl.LOCK_EX ) try: print(*snake_case__ ) finally: fcntl.flock(snake_case__ , fcntl.LOCK_UN ) UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) UpperCAmelCase__ : Any = torch.device("cuda", local_rank) UpperCAmelCase__ : Union[str, Any] = socket.gethostname() UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank UpperCAmelCase__ : Optional[int] = dist.get_rank() UpperCAmelCase__ : List[str] = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
676
1
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class __lowercase ( unittest.TestCase ): def _a ( self) -> None: __snake_case = Vector([1, 2, 3]) self.assertEqual(x.component(0) , 1) self.assertEqual(x.component(2) , 3) __snake_case = Vector() def _a ( self) -> None: __snake_case = Vector([0, 0, 0, 0, 0, 1]) self.assertEqual(str(lowercase_) , '(0,0,0,0,0,1)') def _a ( self) -> None: __snake_case = Vector([1, 2, 3, 4]) self.assertEqual(len(lowercase_) , 4) def _a ( self) -> None: __snake_case = Vector([1, 2]) __snake_case = Vector([1, 2, 3, 4, 5]) __snake_case = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) __snake_case = Vector([1, -1, 1, -1, 2, -3, 4, -5]) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3) self.assertEqual(z.euclidean_length() , 0) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3) def _a ( self) -> None: __snake_case = Vector([1, 2, 3]) __snake_case = Vector([1, 1, 1]) self.assertEqual((x + y).component(0) , 2) self.assertEqual((x + y).component(1) , 3) self.assertEqual((x + y).component(2) , 4) def _a ( self) -> None: __snake_case = Vector([1, 2, 3]) __snake_case = Vector([1, 1, 1]) self.assertEqual((x - y).component(0) , 0) self.assertEqual((x - y).component(1) , 1) self.assertEqual((x - y).component(2) , 2) def _a ( self) -> None: __snake_case = Vector([1, 2, 3]) __snake_case = Vector([2, -1, 4]) # for test of dot product __snake_case = Vector([1, -2, -1]) self.assertEqual(str(x * 3.0) , '(3.0,6.0,9.0)') self.assertEqual((a * b) , 0) def _a ( self) -> None: self.assertEqual(str(zero_vector(1_0)).count('0') , 1_0) def _a ( self) -> None: self.assertEqual(str(unit_basis_vector(3 , 1)) , '(0,1,0)') def _a ( self) -> None: __snake_case = Vector([1, 2, 3]) __snake_case = Vector([1, 0, 1]) self.assertEqual(str(axpy(2 , lowercase_ , lowercase_)) , '(3,4,7)') def _a ( self) -> None: __snake_case = Vector([1, 0, 0, 0, 0, 0]) __snake_case = x.copy() self.assertEqual(str(lowercase_) , str(lowercase_)) def _a ( self) -> None: __snake_case = Vector([1, 0, 0]) x.change_component(0 , 0) x.change_component(1 , 1) self.assertEqual(str(lowercase_) , '(0,1,0)') def _a ( self) -> None: __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(lowercase_)) def _a ( self) -> None: __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) __snake_case = [[-3, -1_4, -1_0], [-5, -1_0, -5], [-2, -1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(minors[x][y] , a.minor(lowercase_ , lowercase_)) def _a ( self) -> None: __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) __snake_case = [[-3, 1_4, -1_0], [5, -1_0, 5], [-2, 1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(cofactors[x][y] , a.cofactor(lowercase_ , lowercase_)) def _a ( self) -> None: __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(-5 , a.determinant()) def _a ( self) -> None: __snake_case = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3) __snake_case = Vector([1, 2, 3]) self.assertEqual('(14,32,50)' , str(a * x)) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2)) def _a ( self) -> None: __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) a.change_component(0 , 2 , 5) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(lowercase_)) def _a ( self) -> None: __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(7 , a.component(2 , 1) , 0.01) def _a ( self) -> None: __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) __snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b)) def _a ( self) -> None: __snake_case = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) __snake_case = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 1_0]] , 3 , 3) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b)) def _a ( self) -> None: self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5)) , ) if __name__ == "__main__": unittest.main()
676
from datetime import datetime import requests def A ( snake_case__ : str ) -> bytes: '''simple docstring''' __snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' __snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(snake_case__ ).content if __name__ == "__main__": UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip() UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
676
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : Any = { "RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json", "RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json", "RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json", "RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json", "RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json", "RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json", "RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json", "RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json", "RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json", "RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json", } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''rwkv''' __UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self , lowercase_=5_0_2_7_7 , lowercase_=1_0_2_4 , lowercase_=4_0_9_6 , lowercase_=3_2 , lowercase_=None , lowercase_=None , lowercase_=1e-5 , lowercase_=0 , lowercase_=0 , lowercase_=6 , lowercase_=False , lowercase_=True , **lowercase_ , ) -> Optional[Any]: __snake_case = vocab_size __snake_case = context_length __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = attention_hidden_size if attention_hidden_size is not None else hidden_size __snake_case = intermediate_size if intermediate_size is not None else 4 * hidden_size __snake_case = layer_norm_epsilon __snake_case = rescale_every __snake_case = use_cache __snake_case = bos_token_id __snake_case = eos_token_id super().__init__( tie_word_embeddings=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_)
676
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __lowercase : def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]: __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = scope def _a ( self) -> Union[str, Any]: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length]) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __snake_case = ids_tensor([self.batch_size] , self.num_choices) __snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , ) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]: __snake_case = OpenLlamaModel(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_) __snake_case = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]: __snake_case = True __snake_case = OpenLlamaModel(lowercase_) model.to(lowercase_) model.eval() __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , ) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , ) __snake_case = model(lowercase_ , attention_mask=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str: __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]: __snake_case = True __snake_case = True __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() # first forward pass __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , ) __snake_case = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size) __snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and __snake_case = torch.cat([input_ids, next_tokens] , dim=-1) __snake_case = torch.cat([input_mask, next_mask] , dim=-1) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] # select random slice __snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item() __snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3)) def _a ( self) -> Optional[Any]: __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else () __UpperCAmelCase = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False def _a ( self) -> Tuple: __snake_case = OpenLlamaModelTester(self) __snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7) def _a ( self) -> int: self.config_tester.run_common_tests() def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'single_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> int: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'multi_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def _a ( self) -> List[Any]: pass @parameterized.expand([('linear',), ('dynamic',)]) def _a ( self , lowercase_) -> Optional[Any]: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = ids_tensor([1, 1_0] , config.vocab_size) __snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = OpenLlamaModel(lowercase_) original_model.to(lowercase_) original_model.eval() __snake_case = original_model(lowercase_).last_hidden_state __snake_case = original_model(lowercase_).last_hidden_state set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = {'type': scaling_type, 'factor': 10.0} __snake_case = OpenLlamaModel(lowercase_) scaled_model.to(lowercase_) scaled_model.eval() __snake_case = scaled_model(lowercase_).last_hidden_state __snake_case = scaled_model(lowercase_).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) else: self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
676
1
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase__ : Dict = logging.get_logger(__name__) class __lowercase ( lowerCamelCase__ ): def __init__( self , *lowercase_ , **lowercase_) -> None: warnings.warn( 'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ChineseCLIPImageProcessor instead.' , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_)
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if number < 0: return False __snake_case = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
import string def A ( snake_case__ : str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): __snake_case = '' for symbol in message: if symbol in string.ascii_uppercase: __snake_case = string.ascii_uppercase.find(snake_case__ ) __snake_case = num - key if num < 0: __snake_case = num + len(string.ascii_uppercase ) __snake_case = translated + string.ascii_uppercase[num] else: __snake_case = translated + symbol print(f"Decryption using Key #{key}: {translated}" ) def A ( ) -> None: '''simple docstring''' __snake_case = input('Encrypted message: ' ) __snake_case = message.upper() decrypt(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
676
import numpy as np def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return vector * sigmoid(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ : Tuple = { "configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"], "processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = ["VisionTextDualEncoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = ["FlaxVisionTextDualEncoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[Any] = ["TFVisionTextDualEncoderModel"] if TYPE_CHECKING: from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel else: import sys UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True __snake_case = 4 __snake_case = (1 << p) - 1 for _ in range(p - 2 ): __snake_case = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
676
1
# Lint as: python3 import itertools import os import re UpperCAmelCase__ : List[Any] = re.compile(r"([A-Z]+)([A-Z][a-z])") UpperCAmelCase__ : Tuple = re.compile(r"([a-z\d])([A-Z])") UpperCAmelCase__ : Optional[Any] = re.compile(r"(?<!_)_(?!_)") UpperCAmelCase__ : int = re.compile(r"(_{2,})") UpperCAmelCase__ : List[str] = r"^\w+(\.\w+)*$" UpperCAmelCase__ : List[Any] = r"<>:/\|?*" def A ( snake_case__ : List[str] ) -> str: '''simple docstring''' __snake_case = _uppercase_uppercase_re.sub(r'\1_\2' , snake_case__ ) __snake_case = _lowercase_uppercase_re.sub(r'\1_\2' , snake_case__ ) return name.lower() def A ( snake_case__ : List[str] ) -> int: '''simple docstring''' __snake_case = _single_underscore_re.split(snake_case__ ) __snake_case = [_multiple_underscores_re.split(snake_case__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case__ ) if n != '' ) def A ( snake_case__ : str ) -> Optional[Any]: '''simple docstring''' if os.path.basename(snake_case__ ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) return camelcase_to_snakecase(snake_case__ ) def A ( snake_case__ : Optional[Any] , snake_case__ : Any ) -> str: '''simple docstring''' if os.path.basename(snake_case__ ) != name: raise ValueError(f"Should be a dataset name, not a path: {name}" ) if not re.match(_split_re , snake_case__ ): raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." ) return f"{filename_prefix_for_name(snake_case__ )}-{split}" def A ( snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : str=None ) -> Optional[int]: '''simple docstring''' __snake_case = filename_prefix_for_split(snake_case__ , snake_case__ ) if filetype_suffix: prefix += f".{filetype_suffix}" __snake_case = os.path.join(snake_case__ , snake_case__ ) return f"{filepath}*" def A ( snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[Any]=None , snake_case__ : str=None ) -> Optional[Any]: '''simple docstring''' __snake_case = filename_prefix_for_split(snake_case__ , snake_case__ ) __snake_case = os.path.join(snake_case__ , snake_case__ ) if shard_lengths: __snake_case = len(snake_case__ ) __snake_case = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(snake_case__ )] if filetype_suffix: __snake_case = [filename + f".{filetype_suffix}" for filename in filenames] return filenames else: __snake_case = prefix if filetype_suffix: filename += f".{filetype_suffix}" return [filename]
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"] UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Any = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL UpperCAmelCase__ : Dict = logging.get_logger(__name__) class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = ['''pixel_values'''] def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None: super().__init__(**lowercase_) __snake_case = size if size is not None else {'shortest_edge': 3_8_4} __snake_case = get_size_dict(lowercase_ , default_to_square=lowercase_) __snake_case = do_resize __snake_case = size # Default value set here for backwards compatibility where the value in config is None __snake_case = crop_pct if crop_pct is not None else 2_2_4 / 2_5_6 __snake_case = resample __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_normalize __snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray: __snake_case = get_size_dict(lowercase_ , default_to_square=lowercase_) if "shortest_edge" not in size: raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}") __snake_case = size['shortest_edge'] if shortest_edge < 3_8_4: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __snake_case = int(shortest_edge / crop_pct) __snake_case = get_resize_output_image_size(lowercase_ , size=lowercase_ , default_to_square=lowercase_) __snake_case = resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_) # then crop to (shortest_edge, shortest_edge) return center_crop(image=lowercase_ , size=(shortest_edge, shortest_edge) , data_format=lowercase_ , **lowercase_) else: # warping (no cropping) when evaluated at 384 or larger return resize( lowercase_ , size=(shortest_edge, shortest_edge) , resample=lowercase_ , data_format=lowercase_ , **lowercase_) def _a ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Union[str, Any]: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_) def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image: __snake_case = do_resize if do_resize is not None else self.do_resize __snake_case = crop_pct if crop_pct is not None else self.crop_pct __snake_case = resample if resample is not None else self.resample __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case = do_normalize if do_normalize is not None else self.do_normalize __snake_case = image_mean if image_mean is not None else self.image_mean __snake_case = image_std if image_std is not None else self.image_std __snake_case = size if size is not None else self.size __snake_case = get_size_dict(lowercase_ , default_to_square=lowercase_) __snake_case = make_list_of_images(lowercase_) if not valid_images(lowercase_): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.') if do_resize and size["shortest_edge"] < 3_8_4 and crop_pct is None: raise ValueError('crop_pct must be specified if size < 384.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. __snake_case = [to_numpy_array(lowercase_) for image in images] if do_resize: __snake_case = [self.resize(image=lowercase_ , size=lowercase_ , crop_pct=lowercase_ , resample=lowercase_) for image in images] if do_rescale: __snake_case = [self.rescale(image=lowercase_ , scale=lowercase_) for image in images] if do_normalize: __snake_case = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_) for image in images] __snake_case = [to_channel_dimension_format(lowercase_ , lowercase_) for image in images] __snake_case = {'pixel_values': images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_)
676
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
676
1
import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging UpperCAmelCase__ : List[Any] = ( "https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py" ) UpperCAmelCase__ : str = logging.get_logger(__name__) # pylint: disable=invalid-name def A ( ) -> List[str]: '''simple docstring''' __snake_case = 'https://pypi.org/pypi/diffusers/json' __snake_case = json.loads(request.urlopen(snake_case__ ).read() )['releases'].keys() return sorted(snake_case__ , key=lambda snake_case__ : version.Version(snake_case__ ) ) def A ( ) -> Optional[int]: '''simple docstring''' # This function has already been executed if HF_MODULES_CACHE already is in the Python path. if HF_MODULES_CACHE in sys.path: return sys.path.append(snake_case__ ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) __snake_case = Path(snake_case__ ) / '__init__.py' if not init_path.exists(): init_path.touch() def A ( snake_case__ : Union[str, os.PathLike] ) -> Optional[int]: '''simple docstring''' init_hf_modules() __snake_case = Path(snake_case__ ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) __snake_case = dynamic_module_path / '__init__.py' if not init_path.exists(): init_path.touch() def A ( snake_case__ : Any ) -> str: '''simple docstring''' with open(snake_case__ , 'r' , encoding='utf-8' ) as f: __snake_case = f.read() # Imports of the form `import .xxx` __snake_case = re.findall('^\s*import\s+\.(\S+)\s*$' , snake_case__ , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , snake_case__ , flags=re.MULTILINE ) # Unique-ify return list(set(snake_case__ ) ) def A ( snake_case__ : int ) -> Tuple: '''simple docstring''' __snake_case = False __snake_case = [module_file] __snake_case = [] # Let's recurse through all relative imports while not no_change: __snake_case = [] for f in files_to_check: new_imports.extend(get_relative_imports(snake_case__ ) ) __snake_case = Path(snake_case__ ).parent __snake_case = [str(module_path / m ) for m in new_imports] __snake_case = [f for f in new_import_files if f not in all_relative_imports] __snake_case = [f"{f}.py" for f in new_import_files] __snake_case = len(snake_case__ ) == 0 all_relative_imports.extend(snake_case__ ) return all_relative_imports def A ( snake_case__ : Any ) -> Union[str, Any]: '''simple docstring''' with open(snake_case__ , 'r' , encoding='utf-8' ) as f: __snake_case = f.read() # Imports of the form `import xxx` __snake_case = re.findall('^\s*import\s+(\S+)\s*$' , snake_case__ , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('^\s*from\s+(\S+)\s+import' , snake_case__ , flags=re.MULTILINE ) # Only keep the top-level module __snake_case = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )] # Unique-ify and test we got them all __snake_case = list(set(snake_case__ ) ) __snake_case = [] for imp in imports: try: importlib.import_module(snake_case__ ) except ImportError: missing_packages.append(snake_case__ ) if len(snake_case__ ) > 0: raise ImportError( 'This modeling file requires the following packages that were not found in your environment: ' f"{', '.join(snake_case__ )}. Run `pip install {' '.join(snake_case__ )}`" ) return get_relative_imports(snake_case__ ) def A ( snake_case__ : List[str] , snake_case__ : str ) -> List[str]: '''simple docstring''' __snake_case = module_path.replace(os.path.sep , '.' ) __snake_case = importlib.import_module(snake_case__ ) if class_name is None: return find_pipeline_class(snake_case__ ) return getattr(snake_case__ , snake_case__ ) def A ( snake_case__ : str ) -> str: '''simple docstring''' from ..pipelines import DiffusionPipeline __snake_case = dict(inspect.getmembers(snake_case__ , inspect.isclass ) ) __snake_case = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , snake_case__ ) and cls.__module__.split('.' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:" f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in" f" {loaded_module}." ) __snake_case = cls return pipeline_class def A ( snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , ) -> str: '''simple docstring''' __snake_case = str(snake_case__ ) __snake_case = os.path.join(snake_case__ , snake_case__ ) if os.path.isfile(snake_case__ ): __snake_case = module_file_or_url __snake_case = 'local' elif pretrained_model_name_or_path.count('/' ) == 0: __snake_case = get_diffusers_versions() # cut ".dev0" __snake_case = 'v' + '.'.join(__version__.split('.' )[:3] ) # retrieve github version that matches if revision is None: __snake_case = latest_version if latest_version[1:] in available_versions else 'main' logger.info(f"Defaulting to latest_version: {revision}." ) elif revision in available_versions: __snake_case = f"v{revision}" elif revision == "main": __snake_case = revision else: raise ValueError( f"`custom_revision`: {revision} does not exist. Please make sure to choose one of" f" {', '.join(available_versions + ['main'] )}." ) # community pipeline on GitHub __snake_case = COMMUNITY_PIPELINES_URL.format(revision=snake_case__ , pipeline=snake_case__ ) try: __snake_case = cached_download( snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , ) __snake_case = 'git' __snake_case = pretrained_model_name_or_path + '.py' except EnvironmentError: logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." ) raise else: try: # Load from URL or cache if already cached __snake_case = hf_hub_download( snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , proxies=snake_case__ , resume_download=snake_case__ , local_files_only=snake_case__ , use_auth_token=snake_case__ , ) __snake_case = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) ) except EnvironmentError: logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." ) raise # Check we have all the requirements in our environment __snake_case = check_imports(snake_case__ ) # Now we move the module inside our cached dynamic modules. __snake_case = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(snake_case__ ) __snake_case = Path(snake_case__ ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(snake_case__ , submodule_path / module_file ) for module_needed in modules_needed: __snake_case = f"{module_needed}.py" shutil.copy(os.path.join(snake_case__ , snake_case__ ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(snake_case__ , snake_case__ ): __snake_case = use_auth_token elif use_auth_token is True: __snake_case = HfFolder.get_token() else: __snake_case = None __snake_case = model_info(snake_case__ , revision=snake_case__ , token=snake_case__ ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __snake_case = submodule_path / commit_hash __snake_case = full_submodule + os.path.sep + commit_hash create_dynamic_module(snake_case__ ) if not (submodule_path / module_file).exists(): shutil.copy(snake_case__ , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( snake_case__ , f"{module_needed}.py" , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , ) return os.path.join(snake_case__ , snake_case__ ) def A ( snake_case__ : Union[str, os.PathLike] , snake_case__ : str , snake_case__ : Optional[str] = None , snake_case__ : Optional[Union[str, os.PathLike]] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : Optional[Dict[str, str]] = None , snake_case__ : Optional[Union[bool, str]] = None , snake_case__ : Optional[str] = None , snake_case__ : bool = False , **snake_case__ : List[Any] , ) -> Dict: '''simple docstring''' __snake_case = get_cached_module_file( snake_case__ , snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , resume_download=snake_case__ , proxies=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , local_files_only=snake_case__ , ) return get_class_in_module(snake_case__ , final_module.replace('.py' , '' ) )
676
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def A ( snake_case__ : List[Any] ) -> Any: '''simple docstring''' __snake_case = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __snake_case = 4 __snake_case = 48 __snake_case = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = [6, 6, 6, 6] __snake_case = 60 __snake_case = [6, 6, 6, 6] __snake_case = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = 4 __snake_case = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __snake_case = 1 __snake_case = 1 __snake_case = 126 __snake_case = 7 __snake_case = 255.0 __snake_case = '' return config def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: __snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: __snake_case = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: __snake_case = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: __snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __snake_case = name.replace('attn' , 'attention.self' ) if "norm1" in name: __snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: __snake_case = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: __snake_case = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: __snake_case = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: __snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: __snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": __snake_case = 'layernorm.weight' if name == "norm.bias": __snake_case = 'layernorm.bias' if "conv_first" in name: __snake_case = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __snake_case = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: __snake_case = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: __snake_case = name.replace('upsample.2' , 'upsample.convolution_1' ) __snake_case = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": __snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) __snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: __snake_case = 'swin2sr.' + name return name def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): __snake_case = orig_state_dict.pop(snake_case__ ) if "qkv" in key: __snake_case = key.split('.' ) __snake_case = int(key_split[1] ) __snake_case = int(key_split[4] ) __snake_case = config.embed_dim if "weight" in key: __snake_case = val[:dim, :] __snake_case = val[dim : dim * 2, :] __snake_case = val[-dim:, :] else: __snake_case = val[:dim] __snake_case = val[dim : dim * 2] __snake_case = val[-dim:] pass else: __snake_case = val return orig_state_dict def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple: '''simple docstring''' __snake_case = get_config(snake_case__ ) __snake_case = SwinaSRForImageSuperResolution(snake_case__ ) model.eval() __snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' ) __snake_case = convert_state_dict(snake_case__ , snake_case__ ) __snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0: raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"Unexpected key {key} in state_dict" ) # verify values __snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' __snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) __snake_case = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __snake_case = 126 if 'Jpeg' in checkpoint_url else 256 __snake_case = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __snake_case = transforms(snake_case__ ).unsqueeze(0 ) if config.num_channels == 1: __snake_case = pixel_values[:, 0, :, :].unsqueeze(1 ) __snake_case = model(snake_case__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 ) print('Looks ok!' ) __snake_case = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } __snake_case = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(snake_case__ ) if push_to_hub: model.push_to_hub(f"caidas/{model_name}" ) processor.push_to_hub(f"caidas/{model_name}" ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") UpperCAmelCase__ : Optional[Any] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
676
1
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __lowercase ( lowerCamelCase__ ): def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple: super().__init__(*lowercase_ , **lowercase_) __snake_case = eval_examples __snake_case = post_process_function def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]: __snake_case = gen_kwargs.copy() __snake_case = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length ) __snake_case = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams ) __snake_case = gen_kwargs __snake_case = self.eval_dataset if eval_dataset is None else eval_dataset __snake_case = self.get_eval_dataloader(lowercase_) __snake_case = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_) __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) else: __snake_case = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowercase_) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_) return metrics def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]: __snake_case = gen_kwargs.copy() __snake_case = self.get_test_dataloader(lowercase_) # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict') __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ : int = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , lowercase_ = 1_2_8 , lowercase_ = 2_5_6 , lowercase_ = 2000.0 , lowercase_ = 7_6_8 , lowercase_ = 1_2 , lowercase_ = 1_2 , lowercase_ = 6_4 , lowercase_ = 2_0_4_8 , lowercase_ = 0.1 , ) -> Union[str, Any]: super().__init__() __snake_case = nn.Sequential( nn.Linear(lowercase_ , d_model * 4 , bias=lowercase_) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowercase_) , nn.SiLU() , ) __snake_case = nn.Embedding(lowercase_ , lowercase_) __snake_case = False __snake_case = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_) __snake_case = nn.Dropout(p=lowercase_) __snake_case = nn.ModuleList() for lyr_num in range(lowercase_): # FiLM conditional T5 decoder __snake_case = DecoderLayer(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_) self.decoders.append(lowercase_) __snake_case = TaLayerNorm(lowercase_) __snake_case = nn.Dropout(p=lowercase_) __snake_case = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_) def _a ( self , lowercase_ , lowercase_) -> Tuple: __snake_case = torch.mul(query_input.unsqueeze(-1) , key_input.unsqueeze(-2)) return mask.unsqueeze(-3) def _a ( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]: __snake_case , __snake_case , __snake_case = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. __snake_case = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype) __snake_case = self.conditioning_emb(lowercase_).unsqueeze(1) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) __snake_case = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. __snake_case = torch.broadcast_to( torch.arange(lowercase_ , device=decoder_input_tokens.device) , (batch, seq_length) , ) __snake_case = self.position_encoding(lowercase_) __snake_case = self.continuous_inputs_projection(lowercase_) inputs += position_encodings __snake_case = self.dropout(lowercase_) # decoder: No padding present. __snake_case = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype) # Translate encoding masks to encoder-decoder masks. __snake_case = [(x, self.encoder_decoder_mask(lowercase_ , lowercase_)) for x, y in encodings_and_masks] # cross attend style: concat encodings __snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1) __snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1) for lyr in self.decoders: __snake_case = lyr( lowercase_ , conditioning_emb=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , )[0] __snake_case = self.decoder_norm(lowercase_) __snake_case = self.post_dropout(lowercase_) __snake_case = self.spec_out(lowercase_) return spec_out class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=1e-6) -> Optional[Any]: super().__init__() __snake_case = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_)) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=lowercase_ , d_kv=lowercase_ , num_heads=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_ , )) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_ , layer_norm_epsilon=lowercase_)) def _a ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> List[str]: __snake_case = self.layer[0]( lowercase_ , conditioning_emb=lowercase_ , attention_mask=lowercase_ , ) if encoder_hidden_states is not None: __snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1e10).to( encoder_hidden_states.dtype) __snake_case = self.layer[1]( lowercase_ , key_value_states=lowercase_ , attention_mask=lowercase_ , ) # Apply Film Conditional Feed Forward layer __snake_case = self.layer[-1](lowercase_ , lowercase_) return (hidden_states,) class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]: super().__init__() __snake_case = TaLayerNorm(lowercase_) __snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_) __snake_case = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_) __snake_case = nn.Dropout(lowercase_) def _a ( self , lowercase_ , lowercase_=None , lowercase_=None , ) -> Optional[Any]: # pre_self_attention_layer_norm __snake_case = self.layer_norm(lowercase_) if conditioning_emb is not None: __snake_case = self.FiLMLayer(lowercase_ , lowercase_) # Self-attention block __snake_case = self.attention(lowercase_) __snake_case = hidden_states + self.dropout(lowercase_) return hidden_states class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> List[Any]: super().__init__() __snake_case = Attention(query_dim=lowercase_ , heads=lowercase_ , dim_head=lowercase_ , out_bias=lowercase_ , scale_qk=lowercase_) __snake_case = TaLayerNorm(lowercase_ , eps=lowercase_) __snake_case = nn.Dropout(lowercase_) def _a ( self , lowercase_ , lowercase_=None , lowercase_=None , ) -> List[Any]: __snake_case = self.layer_norm(lowercase_) __snake_case = self.attention( lowercase_ , encoder_hidden_states=lowercase_ , attention_mask=attention_mask.squeeze(1) , ) __snake_case = hidden_states + self.dropout(lowercase_) return layer_output class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[int]: super().__init__() __snake_case = TaDenseGatedActDense(d_model=lowercase_ , d_ff=lowercase_ , dropout_rate=lowercase_) __snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowercase_) __snake_case = TaLayerNorm(lowercase_ , eps=lowercase_) __snake_case = nn.Dropout(lowercase_) def _a ( self , lowercase_ , lowercase_=None) -> Any: __snake_case = self.layer_norm(lowercase_) if conditioning_emb is not None: __snake_case = self.film(lowercase_ , lowercase_) __snake_case = self.DenseReluDense(lowercase_) __snake_case = hidden_states + self.dropout(lowercase_) return hidden_states class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_ , lowercase_) -> Union[str, Any]: super().__init__() __snake_case = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_) __snake_case = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_) __snake_case = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_) __snake_case = nn.Dropout(lowercase_) __snake_case = NewGELUActivation() def _a ( self , lowercase_) -> int: __snake_case = self.act(self.wi_a(lowercase_)) __snake_case = self.wi_a(lowercase_) __snake_case = hidden_gelu * hidden_linear __snake_case = self.dropout(lowercase_) __snake_case = self.wo(lowercase_) return hidden_states class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_=1e-6) -> Optional[Any]: super().__init__() __snake_case = nn.Parameter(torch.ones(lowercase_)) __snake_case = eps def _a ( self , lowercase_) -> Optional[Any]: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 __snake_case = hidden_states.to(torch.floataa).pow(2).mean(-1 , keepdim=lowercase_) __snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: __snake_case = hidden_states.to(self.weight.dtype) return self.weight * hidden_states class __lowercase ( nn.Module ): def _a ( self , lowercase_) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.04_4715 * torch.pow(lowercase_ , 3.0)))) class __lowercase ( nn.Module ): def __init__( self , lowercase_ , lowercase_) -> int: super().__init__() __snake_case = nn.Linear(lowercase_ , out_features * 2 , bias=lowercase_) def _a ( self , lowercase_ , lowercase_) -> Optional[int]: __snake_case = self.scale_bias(lowercase_) __snake_case , __snake_case = torch.chunk(lowercase_ , 2 , -1) __snake_case = x * (1 + scale) + shift return x
676
from __future__ import annotations class __lowercase : def __init__( self , lowercase_) -> None: __snake_case = data __snake_case = None __snake_case = None def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def A ( snake_case__ : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def A ( snake_case__ : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def A ( ) -> None: # Main function for testing. '''simple docstring''' __snake_case = Node(1 ) __snake_case = Node(2 ) __snake_case = Node(3 ) __snake_case = Node(4 ) __snake_case = Node(5 ) __snake_case = Node(6 ) __snake_case = Node(7 ) __snake_case = Node(8 ) __snake_case = Node(9 ) print(is_full_binary_tree(snake_case__ ) ) print(depth_of_tree(snake_case__ ) ) print('Tree is: ' ) display(snake_case__ ) if __name__ == "__main__": main()
676
1
def A ( snake_case__ : list ) -> int: '''simple docstring''' if not grid or not grid[0]: raise TypeError('The grid does not contain the appropriate information' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] __snake_case = grid[0] for row_n in range(1 , len(snake_case__ ) ): __snake_case = grid[row_n] __snake_case = fill_row(snake_case__ , snake_case__ ) __snake_case = grid[row_n] return grid[-1][-1] def A ( snake_case__ : list , snake_case__ : list ) -> list: '''simple docstring''' current_row[0] += row_above[0] for cell_n in range(1 , len(snake_case__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
676
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : int = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''table-transformer''' __UpperCAmelCase = ['''past_key_values'''] __UpperCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') __snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(lowercase_ , lowercase_): __snake_case = backbone_config.get('model_type') __snake_case = CONFIG_MAPPING[backbone_model_type] __snake_case = config_class.from_dict(lowercase_) # set timm attributes to None __snake_case , __snake_case , __snake_case = None, None, None __snake_case = use_timm_backbone __snake_case = backbone_config __snake_case = num_channels __snake_case = num_queries __snake_case = d_model __snake_case = encoder_ffn_dim __snake_case = encoder_layers __snake_case = encoder_attention_heads __snake_case = decoder_ffn_dim __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = activation_function __snake_case = init_std __snake_case = init_xavier_std __snake_case = encoder_layerdrop __snake_case = decoder_layerdrop __snake_case = encoder_layers __snake_case = auxiliary_loss __snake_case = position_embedding_type __snake_case = backbone __snake_case = use_pretrained_backbone __snake_case = dilation # Hungarian matcher __snake_case = class_cost __snake_case = bbox_cost __snake_case = giou_cost # Loss coefficients __snake_case = mask_loss_coefficient __snake_case = dice_loss_coefficient __snake_case = bbox_loss_coefficient __snake_case = giou_loss_coefficient __snake_case = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_) @property def _a ( self) -> int: return self.encoder_attention_heads @property def _a ( self) -> int: return self.d_model class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = version.parse('''1.11''' ) @property def _a ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def _a ( self) -> float: return 1e-5 @property def _a ( self) -> int: return 1_2
676
1
from ..utils import DummyObject, requires_backends class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> str: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[str]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> str: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> int: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[str]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[str]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Any: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> str: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Dict: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> str: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> str: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[str]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> str: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[str]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> str: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[str]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) def A ( *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ) -> List[Any]: '''simple docstring''' requires_backends(snake_case__ , ['torch'] ) def A ( *snake_case__ : int , **snake_case__ : Dict ) -> List[str]: '''simple docstring''' requires_backends(snake_case__ , ['torch'] ) def A ( *snake_case__ : Optional[int] , **snake_case__ : Tuple ) -> Dict: '''simple docstring''' requires_backends(snake_case__ , ['torch'] ) def A ( *snake_case__ : Tuple , **snake_case__ : Optional[int] ) -> List[str]: '''simple docstring''' requires_backends(snake_case__ , ['torch'] ) def A ( *snake_case__ : str , **snake_case__ : Optional[int] ) -> List[Any]: '''simple docstring''' requires_backends(snake_case__ , ['torch'] ) def A ( *snake_case__ : int , **snake_case__ : Union[str, Any] ) -> Dict: '''simple docstring''' requires_backends(snake_case__ , ['torch'] ) def A ( *snake_case__ : List[str] , **snake_case__ : Union[str, Any] ) -> List[Any]: '''simple docstring''' requires_backends(snake_case__ , ['torch'] ) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[str]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Any: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[str]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> int: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[str]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Dict: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Any: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> str: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Tuple: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[str]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Any: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> str: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[str]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[str]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> str: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[str]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Dict: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Any: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Any: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Any: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Dict: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Any: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Any: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> int: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Any: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> str: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Any: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Tuple: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Dict: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Dict: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Tuple: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Any: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> str: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Dict: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> str: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Dict: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[str]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Tuple: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Any: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Tuple: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Dict: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> int: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> str: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Dict: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(cls , ['torch']) class __lowercase ( metaclass=lowerCamelCase__ ): __UpperCAmelCase = ['''torch'''] def __init__( self , *lowercase_ , **lowercase_) -> Union[str, Any]: requires_backends(self , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> List[str]: requires_backends(cls , ['torch']) @classmethod def _a ( cls , *lowercase_ , **lowercase_) -> Optional[int]: requires_backends(cls , ['torch'])
676
from maths.prime_check import is_prime def A ( snake_case__ : int ) -> int: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
1
from __future__ import annotations UpperCAmelCase__ : str = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( snake_case__ : Matrix , snake_case__ : int , snake_case__ : int , snake_case__ : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( snake_case__ : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( snake_case__ : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(snake_case__ ): __snake_case , __snake_case = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(snake_case__ , snake_case__ , snake_case__ , snake_case__ ): __snake_case = digit if sudoku(snake_case__ ) is not None: return grid __snake_case = 0 return None def A ( snake_case__ : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(snake_case__ , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ : Union[str, Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
676
from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def A ( snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Any ) -> Optional[int]: '''simple docstring''' __snake_case = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ ) assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}"
676
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ : int = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : str = [ "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMSNModel", "ViTMSNForImageClassification", "ViTMSNPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys UpperCAmelCase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params UpperCAmelCase__ : Optional[Any] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["memory_attention", "encoder_attn"], ["attention", "attn"], ["/", "."], [".LayerNorm.gamma", "_layer_norm.weight"], [".LayerNorm.beta", "_layer_norm.bias"], ["r.layer_", "r.layers."], ["output_proj", "out_proj"], ["ffn.dense_1.", "fc2."], ["ffn.dense.", "fc1."], ["ffn_layer_norm", "final_layer_norm"], ["kernel", "weight"], ["encoder_layer_norm.", "encoder.layer_norm."], ["decoder_layer_norm.", "decoder.layer_norm."], ["embeddings.weights", "shared.weight"], ] def A ( snake_case__ : List[Any] ) -> str: '''simple docstring''' for pegasus_name, hf_name in PATTERNS: __snake_case = k.replace(snake_case__ , snake_case__ ) return k def A ( snake_case__ : dict , snake_case__ : dict ) -> PegasusForConditionalGeneration: '''simple docstring''' __snake_case = DEFAULTS.copy() cfg_kwargs.update(snake_case__ ) __snake_case = PegasusConfig(**snake_case__ ) __snake_case = PegasusForConditionalGeneration(snake_case__ ) __snake_case = torch_model.model.state_dict() __snake_case = {} for k, v in tf_weights.items(): __snake_case = rename_state_dict_key(snake_case__ ) if new_k not in sd: raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if "dense" in k or "proj" in new_k: __snake_case = v.T __snake_case = torch.tensor(snake_case__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, f"{new_k}, {k}, {v.shape}, {sd[new_k].shape}" # make sure embedding.padding_idx is respected __snake_case = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] ) __snake_case = mapping['shared.weight'] __snake_case = mapping['shared.weight'] __snake_case = {k: torch.zeros_like(snake_case__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping} mapping.update(**snake_case__ ) __snake_case , __snake_case = torch_model.model.load_state_dict(snake_case__ , strict=snake_case__ ) __snake_case = [ k for k in missing if k not in ['encoder.embed_positions.weight', 'decoder.embed_positions.weight'] ] assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}" assert extra == [], f"no matches found for the following tf keys {extra}" return torch_model def A ( snake_case__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict: '''simple docstring''' __snake_case = tf.train.list_variables(snake_case__ ) __snake_case = {} __snake_case = ['Adafactor', 'global_step'] for name, shape in tqdm(snake_case__ , desc='converting tf checkpoint to dict' ): __snake_case = any(pat in name for pat in ignore_name ) if skip_key: continue __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) __snake_case = array return tf_weights def A ( snake_case__ : str , snake_case__ : str ) -> Tuple: '''simple docstring''' # save tokenizer first __snake_case = Path(snake_case__ ).parent.name __snake_case = task_specific_params[f"summarization_{dataset}"]['max_position_embeddings'] __snake_case = PegasusTokenizer.from_pretrained('sshleifer/pegasus' , model_max_length=snake_case__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(snake_case__ ) # convert model __snake_case = get_tf_weights_as_numpy(snake_case__ ) __snake_case = task_specific_params[f"summarization_{dataset}"] if dataset == "large": __snake_case = task_specific_params __snake_case = convert_pegasus(snake_case__ , snake_case__ ) torch_model.save_pretrained(snake_case__ ) __snake_case = torch_model.state_dict() sd.pop('model.decoder.embed_positions.weight' ) sd.pop('model.encoder.embed_positions.weight' ) torch.save(snake_case__ , Path(snake_case__ ) / 'pytorch_model.bin' ) if __name__ == "__main__": UpperCAmelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.") UpperCAmelCase__ : int = parser.parse_args() if args.save_dir is None: UpperCAmelCase__ : List[str] = Path(args.tf_ckpt_path).parent.name UpperCAmelCase__ : str = os.path.join("pegasus", dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
676
1
from sklearn.metrics import fa_score import datasets UpperCAmelCase__ : Optional[int] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" UpperCAmelCase__ : int = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" UpperCAmelCase__ : Optional[Any] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowercase ( datasets.Metric ): def _a ( self) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32')), 'references': datasets.Sequence(datasets.Value('int32')), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32'), 'references': datasets.Value('int32'), }) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , ) def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=1 , lowercase_="binary" , lowercase_=None) -> Optional[int]: __snake_case = fa_score( lowercase_ , lowercase_ , labels=lowercase_ , pos_label=lowercase_ , average=lowercase_ , sample_weight=lowercase_) return {"f1": float(lowercase_) if score.size == 1 else score}
676
import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name class __lowercase ( lowerCamelCase__ ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]: super().__init__() if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , ) def _a ( self , lowercase_ = "auto") -> Union[str, Any]: if slice_size == "auto": __snake_case = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_) def _a ( self) -> Any: self.enable_attention_slicing(lowercase_) @torch.no_grad() def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]: __snake_case = self.speech_processor.feature_extractor( lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device) __snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0) __snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[ 0 ] if isinstance(lowercase_ , lowercase_): __snake_case = 1 elif isinstance(lowercase_ , lowercase_): __snake_case = len(lowercase_) else: raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0) ): raise ValueError( F"`callback_steps` has to be a positive integer but is {callback_steps} of type" F" {type(lowercase_)}.") # get prompt text embeddings __snake_case = self.tokenizer( lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) __snake_case = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F" {self.tokenizer.model_max_length} tokens: {removed_text}") __snake_case = text_input_ids[:, : self.tokenizer.model_max_length] __snake_case = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case , __snake_case , __snake_case = text_embeddings.shape __snake_case = text_embeddings.repeat(1 , lowercase_ , 1) __snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case = 42 if negative_prompt is None: __snake_case = [''] * batch_size elif type(lowercase_) is not type(lowercase_): raise TypeError( F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !=" F" {type(lowercase_)}.") elif isinstance(lowercase_ , lowercase_): __snake_case = [negative_prompt] elif batch_size != len(lowercase_): raise ValueError( F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:" F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" ' the batch size of `prompt`.') else: __snake_case = negative_prompt __snake_case = text_input_ids.shape[-1] __snake_case = self.tokenizer( lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , ) __snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case = uncond_embeddings.shape[1] __snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1) __snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to( self.device) else: __snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_) else: if latents.shape != latents_shape: raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}") __snake_case = latents.to(self.device) # set timesteps self.scheduler.set_timesteps(lowercase_) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler __snake_case = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) __snake_case = {} if accepts_eta: __snake_case = eta for i, t in enumerate(self.progress_bar(lowercase_)): # expand the latents if we are doing classifier free guidance __snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents __snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_) # predict the noise residual __snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample # perform guidance if do_classifier_free_guidance: __snake_case , __snake_case = noise_pred.chunk(2) __snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ , lowercase_) __snake_case = 1 / 0.1_8215 * latents __snake_case = self.vae.decode(lowercase_).sample __snake_case = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": __snake_case = self.numpy_to_pil(lowercase_) if not return_dict: return image return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
676
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase__ : int = logging.get_logger(__name__) UpperCAmelCase__ : List[str] = {"vocab_file": "spiece.model"} UpperCAmelCase__ : Any = { "vocab_file": { "bert_for_seq_generation": ( "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model" ), } } UpperCAmelCase__ : Optional[Any] = {"bert_for_seq_generation": 5_12} class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = [] __UpperCAmelCase = ['''input_ids''', '''attention_mask'''] def __init__( self , lowercase_ , lowercase_="<s>" , lowercase_="</s>" , lowercase_="<unk>" , lowercase_="<pad>" , lowercase_="<::::>" , lowercase_ = None , **lowercase_ , ) -> None: __snake_case = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , sep_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , ) __snake_case = vocab_file __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(lowercase_) @property def _a ( self) -> Dict: return self.sp_model.get_piece_size() def _a ( self) -> int: __snake_case = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> List[Any]: __snake_case = self.__dict__.copy() __snake_case = None return state def __setstate__( self , lowercase_) -> Tuple: __snake_case = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): __snake_case = {} __snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _a ( self , lowercase_) -> List[str]: return self.sp_model.encode(lowercase_ , out_type=lowercase_) def _a ( self , lowercase_) -> Any: return self.sp_model.piece_to_id(lowercase_) def _a ( self , lowercase_) -> Optional[Any]: __snake_case = self.sp_model.IdToPiece(lowercase_) return token def _a ( self , lowercase_) -> str: __snake_case = [] __snake_case = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase_) + token __snake_case = [] else: current_sub_tokens.append(lowercase_) out_string += self.sp_model.decode(lowercase_) return out_string.strip() def _a ( self , lowercase_ , lowercase_ = None) -> Tuple[str]: if not os.path.isdir(lowercase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __snake_case = os.path.join( lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , lowercase_) elif not os.path.isfile(self.vocab_file): with open(lowercase_ , 'wb') as fi: __snake_case = self.sp_model.serialized_model_proto() fi.write(lowercase_) return (out_vocab_file,)
676
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class __lowercase ( lowerCamelCase__ ): def __init__( self , *lowercase_ , lowercase_=None , lowercase_=None , **lowercase_) -> Tuple: super().__init__(*lowercase_ , **lowercase_) __snake_case = eval_examples __snake_case = post_process_function def _a ( self , lowercase_ = None , lowercase_=None , lowercase_ = None , lowercase_ = "eval" , **lowercase_ , ) -> Dict[str, float]: __snake_case = gen_kwargs.copy() __snake_case = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length') is not None else self.args.generation_max_length ) __snake_case = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams') is not None else self.args.generation_num_beams ) __snake_case = gen_kwargs __snake_case = self.eval_dataset if eval_dataset is None else eval_dataset __snake_case = self.get_eval_dataloader(lowercase_) __snake_case = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_) __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) else: __snake_case = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(lowercase_) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) __snake_case = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase_) return metrics def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_ = "test" , **lowercase_) -> Union[str, Any]: __snake_case = gen_kwargs.copy() __snake_case = self.get_test_dataloader(lowercase_) # Temporarily disable metric computation, we will do it in the loop here. __snake_case = self.compute_metrics __snake_case = None __snake_case = time.time() __snake_case = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __snake_case = eval_loop( lowercase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase_ , metric_key_prefix=lowercase_ , ) finally: __snake_case = compute_metrics __snake_case = self.args.eval_batch_size * self.args.world_size if F"{metric_key_prefix}_jit_compilation_time" in output.metrics: start_time += output.metrics[F"{metric_key_prefix}_jit_compilation_time"] output.metrics.update( speed_metrics( lowercase_ , lowercase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size) , )) if self.post_process_function is None or self.compute_metrics is None: return output __snake_case = self.post_process_function(lowercase_ , lowercase_ , lowercase_ , 'predict') __snake_case = self.compute_metrics(lowercase_) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys()): if not key.startswith(F"{metric_key_prefix}_"): __snake_case = metrics.pop(lowercase_) metrics.update(output.metrics) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase_)
676
1
from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging UpperCAmelCase__ : int = logging.get_logger(__name__) # pylint: disable=invalid-name class __lowercase ( lowerCamelCase__ ): def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> int: super().__init__() if hasattr(scheduler.config , 'steps_offset') and scheduler.config.steps_offset != 1: __snake_case = ( F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " 'to update the config accordingly as leaving `steps_offset` might led to incorrect results' ' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,' ' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`' ' file' ) deprecate('steps_offset!=1' , '1.0.0' , lowercase_ , standard_warn=lowercase_) __snake_case = dict(scheduler.config) __snake_case = 1 __snake_case = FrozenDict(lowercase_) if hasattr(scheduler.config , 'skip_prk_steps') and scheduler.config.skip_prk_steps is False: __snake_case = ( F"The configuration file of this scheduler: {scheduler} has not set the configuration" ' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make' ' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to' ' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face' ' Hub, it would be very nice if you could open a Pull request for the' ' `scheduler/scheduler_config.json` file' ) deprecate('skip_prk_steps not set' , '1.0.0' , lowercase_ , standard_warn=lowercase_) __snake_case = dict(scheduler.config) __snake_case = True __snake_case = FrozenDict(lowercase_) if safety_checker is None: logger.warning( F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .') self.register_modules( segmentation_model=lowercase_ , segmentation_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , ) def _a ( self , lowercase_ = "auto") -> Dict: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __snake_case = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_) def _a ( self) -> Union[str, Any]: self.enable_attention_slicing(lowercase_) def _a ( self) -> Union[str, Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`') __snake_case = torch.device('cuda') for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _a ( self) -> Union[str, Any]: if self.device != torch.device('meta') or not hasattr(self.unet , '_hf_hook'): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , '_hf_hook') and hasattr(module._hf_hook , 'execution_device') and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]: __snake_case = self.segmentation_processor( text=[text] , images=[image] , padding='max_length' , return_tensors='pt').to(self.device) __snake_case = self.segmentation_model(**lowercase_) __snake_case = torch.sigmoid(outputs.logits).cpu().detach().unsqueeze(-1).numpy() __snake_case = self.numpy_to_pil(lowercase_)[0].resize(image.size) # Run inpainting pipeline with the generated mask __snake_case = StableDiffusionInpaintPipeline( vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , ) return inpainting_pipeline( prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , )
676
from __future__ import annotations UpperCAmelCase__ : Dict = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]: '''simple docstring''' __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the reference grid __snake_case = 1 __snake_case = [ [0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) ) ] # the action grid __snake_case = init[0] __snake_case = init[1] __snake_case = 0 __snake_case = g + heuristic[x][y] # cost from starting cell to destination cell __snake_case = [[f, g, x, y]] __snake_case = False # flag that is set when search is complete __snake_case = False # flag set if we can't find expand while not found and not resign: if len(snake_case__ ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __snake_case = cell.pop() __snake_case = next_cell[2] __snake_case = next_cell[3] __snake_case = next_cell[1] if x == goal[0] and y == goal[1]: __snake_case = True else: for i in range(len(snake_case__ ) ): # to try out different valid actions __snake_case = x + DIRECTIONS[i][0] __snake_case = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __snake_case = g + cost __snake_case = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __snake_case = 1 __snake_case = i __snake_case = [] __snake_case = goal[0] __snake_case = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __snake_case = x - DIRECTIONS[action[x][y]][0] __snake_case = y - DIRECTIONS[action[x][y]][1] __snake_case = xa __snake_case = ya invpath.append([x, y] ) __snake_case = [] for i in range(len(snake_case__ ) ): path.append(invpath[len(snake_case__ ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCAmelCase__ : str = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCAmelCase__ : int = [0, 0] # all coordinates are given in format [y,x] UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1] UpperCAmelCase__ : Optional[Any] = 1 # the cost map which pushes the path closer to the goal UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCAmelCase__ : Optional[int] = 99 UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic) print("ACTION MAP") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
676
1
import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = IFImgaImgSuperResolutionPipeline __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} __UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) __UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''} def _a ( self) -> str: return self._get_superresolution_dummy_components() def _a ( self , lowercase_ , lowercase_=0) -> List[str]: if str(lowercase_).startswith('mps'): __snake_case = torch.manual_seed(lowercase_) else: __snake_case = torch.Generator(device=lowercase_).manual_seed(lowercase_) __snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_)).to(lowercase_) __snake_case = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowercase_)).to(lowercase_) __snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _a ( self) -> int: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) def _a ( self) -> List[Any]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA') def _a ( self) -> List[str]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1) def _a ( self) -> List[Any]: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2) def _a ( self) -> List[Any]: self._test_save_load_local() def _a ( self) -> int: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
676
import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCAmelCase__ : Any = logging.getLogger() @unittest.skip('''Temporarily disable the doc tests.''' ) @require_torch @require_tf @slow class __lowercase ( unittest.TestCase ): def _a ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = True , ) -> Dict: __snake_case = [file for file in os.listdir(lowercase_) if os.path.isfile(os.path.join(lowercase_ , lowercase_))] if identifier is not None: __snake_case = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(lowercase_ , lowercase_): for n_ in n_identifier: __snake_case = [file for file in files if n_ not in file] else: __snake_case = [file for file in files if n_identifier not in file] __snake_case = ignore_files or [] ignore_files.append('__init__.py') __snake_case = [file for file in files if file not in ignore_files] for file in files: # Open all files print('Testing' , lowercase_) if only_modules: __snake_case = file.split('.')[0] try: __snake_case = getattr(lowercase_ , lowercase_) __snake_case = doctest.DocTestSuite(lowercase_) __snake_case = unittest.TextTestRunner().run(lowercase_) self.assertIs(len(result.failures) , 0) except AttributeError: logger.info(F"{module_identifier} is not a module.") else: __snake_case = doctest.testfile(str('..' / directory / file) , optionflags=doctest.ELLIPSIS) self.assertIs(result.failed , 0) def _a ( self) -> str: __snake_case = Path('src/transformers') __snake_case = 'modeling' __snake_case = [ 'modeling_ctrl.py', 'modeling_tf_ctrl.py', ] self.analyze_directory(lowercase_ , identifier=lowercase_ , ignore_files=lowercase_) def _a ( self) -> Optional[Any]: __snake_case = Path('src/transformers') __snake_case = 'tokenization' self.analyze_directory(lowercase_ , identifier=lowercase_) def _a ( self) -> List[str]: __snake_case = Path('src/transformers') __snake_case = 'configuration' self.analyze_directory(lowercase_ , identifier=lowercase_) def _a ( self) -> Dict: __snake_case = Path('src/transformers') __snake_case = ['configuration', 'modeling', 'tokenization'] self.analyze_directory(lowercase_ , n_identifier=lowercase_) def _a ( self) -> Dict: __snake_case = Path('docs/source') __snake_case = ['favicon.ico'] self.analyze_directory(lowercase_ , ignore_files=lowercase_ , only_modules=lowercase_)
676
1
from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
676
def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations(snake_case__ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' def count_of_possible_combinations_with_dp_array( snake_case__ : int , snake_case__ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __snake_case = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case__ ) for item in array ) __snake_case = answer return answer __snake_case = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case__ , snake_case__ ) def A ( snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ) -> int: '''simple docstring''' __snake_case = [0] * (target + 1) __snake_case = 1 for i in range(1 , target + 1 ): for j in range(snake_case__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase__ : str = 3 UpperCAmelCase__ : Optional[int] = 5 UpperCAmelCase__ : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
676
1
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = 42 class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , lowercase_ = 3 , lowercase_ = 3 , lowercase_ = ("DownEncoderBlock2D",) , lowercase_ = ("UpDecoderBlock2D",) , lowercase_ = (6_4,) , lowercase_ = 1 , lowercase_ = "silu" , lowercase_ = 3 , lowercase_ = 3_2 , lowercase_ = 2_5_6 , lowercase_ = 3_2 , lowercase_ = None , lowercase_ = 0.1_8215 , lowercase_ = "group" , ) -> Union[str, Any]: super().__init__() # pass init params to Encoder __snake_case = Encoder( in_channels=lowercase_ , out_channels=lowercase_ , down_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , double_z=lowercase_ , ) __snake_case = vq_embed_dim if vq_embed_dim is not None else latent_channels __snake_case = nn.Convad(lowercase_ , lowercase_ , 1) __snake_case = VectorQuantizer(lowercase_ , lowercase_ , beta=0.25 , remap=lowercase_ , sane_index_shape=lowercase_) __snake_case = nn.Convad(lowercase_ , lowercase_ , 1) # pass init params to Decoder __snake_case = Decoder( in_channels=lowercase_ , out_channels=lowercase_ , up_block_types=lowercase_ , block_out_channels=lowercase_ , layers_per_block=lowercase_ , act_fn=lowercase_ , norm_num_groups=lowercase_ , norm_type=lowercase_ , ) @apply_forward_hook def _a ( self , lowercase_ , lowercase_ = True) -> VQEncoderOutput: __snake_case = self.encoder(lowercase_) __snake_case = self.quant_conv(lowercase_) if not return_dict: return (h,) return VQEncoderOutput(latents=lowercase_) @apply_forward_hook def _a ( self , lowercase_ , lowercase_ = False , lowercase_ = True) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: __snake_case , __snake_case , __snake_case = self.quantize(lowercase_) else: __snake_case = h __snake_case = self.post_quant_conv(lowercase_) __snake_case = self.decoder(lowercase_ , quant if self.config.norm_type == 'spatial' else None) if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_) def _a ( self , lowercase_ , lowercase_ = True) -> Union[DecoderOutput, torch.FloatTensor]: __snake_case = sample __snake_case = self.encode(lowercase_).latents __snake_case = self.decode(lowercase_).sample if not return_dict: return (dec,) return DecoderOutput(sample=lowercase_)
676
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss UpperCAmelCase__ : Union[str, Any] = pytest.mark.integration @require_faiss class __lowercase ( lowerCamelCase__ ): def _a ( self) -> List[str]: __snake_case = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase_) for x in np.arange(3_0).tolist()]}) return dset def _a ( self) -> Optional[int]: import faiss __snake_case = self._create_dummy_dataset() __snake_case = dset.map( lambda lowercase_ , lowercase_: {"vecs": i * np.ones(5 , dtype=np.floataa)} , with_indices=lowercase_ , keep_in_memory=lowercase_) __snake_case = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT) __snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') dset.drop_index('vecs') def _a ( self) -> str: import faiss __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __snake_case , __snake_case = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def _a ( self) -> int: import faiss __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name) dset.load_faiss_index('vecs2' , tmp_file.name) os.unlink(tmp_file.name) __snake_case , __snake_case = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa)) self.assertEqual(examples['filename'][0] , 'my_name-train_29') def _a ( self) -> List[Any]: __snake_case = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5)) * np.arange(3_0).reshape(-1 , 1) , index_name='vecs') dset.drop_index('vecs') self.assertRaises(lowercase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa))) def _a ( self) -> Any: from elasticsearch import Elasticsearch __snake_case = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: __snake_case = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 3_0) __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}} __snake_case = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=lowercase_) __snake_case , __snake_case = dset.get_nearest_examples('filename' , 'my_name-train_29') self.assertEqual(examples['filename'][0] , 'my_name-train_29') @require_faiss class __lowercase ( lowerCamelCase__ ): def _a ( self) -> Optional[int]: import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsNotNone(index.faiss_index) self.assertEqual(index.faiss_index.ntotal , 5) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa)) self.assertEqual(index.faiss_index.ntotal , 1_0) # single query __snake_case = np.zeros(5 , dtype=np.floataa) __snake_case = 1 __snake_case , __snake_case = index.search(lowercase_) self.assertRaises(lowercase_ , index.search , query.reshape(-1 , 1)) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) # batched queries __snake_case = np.eye(5 , dtype=np.floataa)[::-1] __snake_case , __snake_case = index.search_batch(lowercase_) self.assertRaises(lowercase_ , index.search_batch , queries[0]) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([4, 3, 2, 1, 0] , lowercase_) def _a ( self) -> str: import faiss __snake_case = FaissIndex(string_factory='Flat') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) __snake_case = FaissIndex(string_factory='LSH') index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexLSH) with self.assertRaises(lowercase_): __snake_case = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5)) def _a ( self) -> Optional[int]: import faiss __snake_case = faiss.IndexFlat(5) __snake_case = FaissIndex(custom_index=lowercase_) index.add_vectors(np.eye(5 , dtype=np.floataa)) self.assertIsInstance(index.faiss_index , faiss.IndexFlat) def _a ( self) -> Tuple: import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT) index.add_vectors(np.eye(5 , dtype=np.floataa)) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase_) as tmp_file: index.save(tmp_file.name) __snake_case = FaissIndex.load(tmp_file.name) os.unlink(tmp_file.name) __snake_case = np.zeros(5 , dtype=np.floataa) __snake_case = 1 __snake_case , __snake_case = index.search(lowercase_) self.assertGreater(scores[0] , 0) self.assertEqual(indices[0] , 1) @require_faiss def A ( snake_case__ : List[str] ) -> List[Any]: '''simple docstring''' import faiss __snake_case = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) __snake_case = 'index.faiss' __snake_case = f"mock://{index_name}" index.save(snake_case__ , storage_options=mockfs.storage_options ) __snake_case = FaissIndex.load(snake_case__ , storage_options=mockfs.storage_options ) __snake_case = np.zeros(5 , dtype=np.floataa ) __snake_case = 1 __snake_case , __snake_case = index.search(snake_case__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class __lowercase ( lowerCamelCase__ ): def _a ( self) -> Optional[Any]: from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search') as mocked_search, patch( 'elasticsearch.client.IndicesClient.create') as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk') as mocked_bulk: __snake_case = Elasticsearch() __snake_case = {'acknowledged': True} __snake_case = ElasticSearchIndex(es_client=lowercase_) mocked_bulk.return_value([(True, None)] * 3) index.add_documents(['foo', 'bar', 'foobar']) # single query __snake_case = 'foo' __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case = index.search(lowercase_) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # single query with timeout __snake_case = 'foo' __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case = index.search(lowercase_ , request_timeout=3_0) self.assertEqual(scores[0] , 1) self.assertEqual(indices[0] , 0) # batched queries __snake_case = ['foo', 'bar', 'foobar'] __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case = index.search_batch(lowercase_) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([1, 1, 1] , lowercase_) # batched queries with timeout __snake_case = ['foo', 'bar', 'foobar'] __snake_case = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case = index.search_batch(lowercase_ , request_timeout=3_0) __snake_case = [scores[0] for scores in total_scores] __snake_case = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase_) , 0) self.assertListEqual([1, 1, 1] , lowercase_)
676
1
import re def A ( snake_case__ : str ) -> str: '''simple docstring''' if len(re.findall('[ATCG]' , snake_case__ ) ) != len(snake_case__ ): raise ValueError('Invalid Strand' ) return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) ) if __name__ == "__main__": import doctest doctest.testmod()
676
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]: '''simple docstring''' __snake_case = args.log_outputs __snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric __snake_case = load_metric('wer' ) __snake_case = load_metric('cer' ) # compute metrics __snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] ) __snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results __snake_case = f"WER: {wer_result}\nCER: {cer_result}" print(snake_case__ ) with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f: f.write(snake_case__ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __snake_case = f"log_{dataset_id}_predictions.txt" __snake_case = f"log_{dataset_id}_targets.txt" with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t: # mapping function to write output def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ): p.write(f"{i}" + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f"{i}" + '\n' ) t.write(batch['target'] + '\n' ) result.map(snake_case__ , with_indices=snake_case__ ) def A ( snake_case__ : str ) -> str: '''simple docstring''' __snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __snake_case = re.sub(snake_case__ , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __snake_case = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: __snake_case = ' '.join(text.split(snake_case__ ) ) return text def A ( snake_case__ : int ) -> Optional[int]: '''simple docstring''' # load dataset __snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __snake_case = AutoFeatureExtractor.from_pretrained(args.model_id ) __snake_case = feature_extractor.sampling_rate # resample audio __snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) ) # load eval pipeline if args.device is None: __snake_case = 0 if torch.cuda.is_available() else -1 __snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case__ : Optional[Any] ): __snake_case = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __snake_case = prediction['text'] __snake_case = normalize_text(batch['sentence'] ) return batch # run inference on all examples __snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case__ , snake_case__ ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) UpperCAmelCase__ : str = parser.parse_args() main(args)
676
1
def A ( snake_case__ : list ) -> list: '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] __snake_case = [] def generate(snake_case__ : int , snake_case__ : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even __snake_case , __snake_case = arr[k - 1], arr[i] else: # k is odd __snake_case , __snake_case = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": UpperCAmelCase__ : Optional[Any] = input("Enter numbers separated by a comma:\n").strip() UpperCAmelCase__ : List[str] = [int(item) for item in user_input.split(",")] print(heaps(arr))
676
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def A ( *snake_case__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' with open(snake_case__ , 'r' ) as fh: fcntl.flock(snake_case__ , fcntl.LOCK_EX ) try: print(*snake_case__ ) finally: fcntl.flock(snake_case__ , fcntl.LOCK_UN ) UpperCAmelCase__ : Any = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) UpperCAmelCase__ : Any = torch.device("cuda", local_rank) UpperCAmelCase__ : Union[str, Any] = socket.gethostname() UpperCAmelCase__ : int = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank UpperCAmelCase__ : Optional[int] = dist.get_rank() UpperCAmelCase__ : List[str] = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
676
1
def A ( snake_case__ : list[list[float]] ) -> list[list[float]]: '''simple docstring''' __snake_case = [] for data in source_data: for i, el in enumerate(snake_case__ ): if len(snake_case__ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(snake_case__ ) ) return data_lists def A ( snake_case__ : list[list[float]] , snake_case__ : list[int] ) -> list[list[float]]: '''simple docstring''' __snake_case = [] for dlist, weight in zip(snake_case__ , snake_case__ ): __snake_case = min(snake_case__ ) __snake_case = max(snake_case__ ) __snake_case = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: __snake_case = f"Invalid weight of {weight:f} provided" raise ValueError(snake_case__ ) score_lists.append(snake_case__ ) return score_lists def A ( snake_case__ : list[list[float]] ) -> list[float]: '''simple docstring''' __snake_case = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(snake_case__ ): __snake_case = final_scores[j] + ele return final_scores def A ( snake_case__ : list[list[float]] , snake_case__ : list[int] ) -> list[list[float]]: '''simple docstring''' __snake_case = get_data(snake_case__ ) __snake_case = calculate_each_score(snake_case__ , snake_case__ ) __snake_case = generate_final_scores(snake_case__ ) # append scores to source data for i, ele in enumerate(snake_case__ ): source_data[i].append(snake_case__ ) return source_data
676
from datetime import datetime import requests def A ( snake_case__ : str ) -> bytes: '''simple docstring''' __snake_case = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url=' __snake_case = requests.get(base_url + url ).json()[0]['urls'][0]['src'] return requests.get(snake_case__ ).content if __name__ == "__main__": UpperCAmelCase__ : Dict = input("Enter Video/IGTV url: ").strip() UpperCAmelCase__ : Optional[Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
676
1
import json import sys def A ( snake_case__ : List[Any] , snake_case__ : str ) -> Union[str, Any]: '''simple docstring''' with open(snake_case__ , encoding='utf-8' ) as f: __snake_case = json.load(snake_case__ ) __snake_case = ['<details>', '<summary>Show updated benchmarks!</summary>', ' '] for benchmark_name in sorted(snake_case__ ): __snake_case = results[benchmark_name] __snake_case = benchmark_name.split('/' )[-1] output_md.append(f"### Benchmark: {benchmark_file_name}" ) __snake_case = '| metric |' __snake_case = '|--------|' __snake_case = '| new / old (diff) |' for metric_name in sorted(snake_case__ ): __snake_case = benchmark_res[metric_name] __snake_case = metric_vals['new'] __snake_case = metric_vals.get('old' , snake_case__ ) __snake_case = metric_vals.get('diff' , snake_case__ ) __snake_case = f" {new_val:f}" if isinstance(snake_case__ , (int, float) ) else 'None' if old_val is not None: val_str += f" / {old_val:f}" if isinstance(snake_case__ , (int, float) ) else "None" if dif_val is not None: val_str += f" ({dif_val:f})" if isinstance(snake_case__ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('</details>' ) with open(snake_case__ , 'w' , encoding='utf-8' ) as f: f.writelines('\n'.join(snake_case__ ) ) if __name__ == "__main__": UpperCAmelCase__ : Optional[Any] = sys.argv[1] UpperCAmelCase__ : Any = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
676
import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class __lowercase : def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=9_9 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Optional[int]: __snake_case = parent __snake_case = batch_size __snake_case = seq_length __snake_case = is_training __snake_case = use_input_mask __snake_case = use_token_type_ids __snake_case = use_labels __snake_case = vocab_size __snake_case = hidden_size __snake_case = num_hidden_layers __snake_case = num_attention_heads __snake_case = intermediate_size __snake_case = hidden_act __snake_case = hidden_dropout_prob __snake_case = attention_probs_dropout_prob __snake_case = max_position_embeddings __snake_case = type_vocab_size __snake_case = type_sequence_label_size __snake_case = initializer_range __snake_case = num_labels __snake_case = num_choices __snake_case = scope def _a ( self) -> Union[str, Any]: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case = None if self.use_input_mask: __snake_case = random_attention_mask([self.batch_size, self.seq_length]) __snake_case = None if self.use_token_type_ids: __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) __snake_case = None __snake_case = None __snake_case = None if self.use_labels: __snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __snake_case = ids_tensor([self.batch_size] , self.num_choices) __snake_case = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self) -> Tuple: return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , use_stable_embedding=lowercase_ , ) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[Any]: __snake_case = OpenLlamaModel(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_) __snake_case = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[Any]: __snake_case = True __snake_case = OpenLlamaModel(lowercase_) model.to(lowercase_) model.eval() __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , ) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , ) __snake_case = model(lowercase_ , attention_mask=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str: __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Optional[int]: __snake_case = True __snake_case = True __snake_case = OpenLlamaForCausalLM(config=lowercase_) model.to(lowercase_) model.eval() # first forward pass __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , use_cache=lowercase_ , ) __snake_case = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size) __snake_case = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and __snake_case = torch.cat([input_ids, next_tokens] , dim=-1) __snake_case = torch.cat([input_mask, next_mask] , dim=-1) __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] __snake_case = model( lowercase_ , attention_mask=lowercase_ , encoder_hidden_states=lowercase_ , encoder_attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )['hidden_states'][0] # select random slice __snake_case = ids_tensor((1,) , output_from_past.shape[-1]).item() __snake_case = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3)) def _a ( self) -> Optional[Any]: __snake_case = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) = config_and_inputs __snake_case = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class __lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): __UpperCAmelCase = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __UpperCAmelCase = (OpenLlamaForCausalLM,) if is_torch_available() else () __UpperCAmelCase = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False def _a ( self) -> Tuple: __snake_case = OpenLlamaModelTester(self) __snake_case = ConfigTester(self , config_class=lowercase_ , hidden_size=3_7) def _a ( self) -> int: self.config_tester.run_common_tests() def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> Optional[Any]: __snake_case = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case = type self.model_tester.create_and_check_model(*lowercase_) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> str: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'single_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) def _a ( self) -> int: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = 3 __snake_case = 'multi_label_classification' __snake_case = input_dict['input_ids'] __snake_case = input_ids.ne(1).to(lowercase_) __snake_case = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float) __snake_case = OpenLlamaForSequenceClassification(lowercase_) model.to(lowercase_) model.eval() __snake_case = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip('Open-Llama buffers include complex numbers, which breaks this test') def _a ( self) -> List[Any]: pass @parameterized.expand([('linear',), ('dynamic',)]) def _a ( self , lowercase_) -> Optional[Any]: __snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common() __snake_case = ids_tensor([1, 1_0] , config.vocab_size) __snake_case = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size) set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = OpenLlamaModel(lowercase_) original_model.to(lowercase_) original_model.eval() __snake_case = original_model(lowercase_).last_hidden_state __snake_case = original_model(lowercase_).last_hidden_state set_seed(4_2) # Fixed seed at init time so the two models get the same random weights __snake_case = {'type': scaling_type, 'factor': 10.0} __snake_case = OpenLlamaModel(lowercase_) scaled_model.to(lowercase_) scaled_model.eval() __snake_case = scaled_model(lowercase_).last_hidden_state __snake_case = scaled_model(lowercase_).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) else: self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1e-5))
676
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase__ : Dict = { "configuration_xmod": [ "XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP", "XmodConfig", "XmodOnnxConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = [ "XMOD_PRETRAINED_MODEL_ARCHIVE_LIST", "XmodForCausalLM", "XmodForMaskedLM", "XmodForMultipleChoice", "XmodForQuestionAnswering", "XmodForSequenceClassification", "XmodForTokenClassification", "XmodModel", "XmodPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys UpperCAmelCase__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if number < 0: return False __snake_case = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
676
1
from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = ['''image_processor'''] __UpperCAmelCase = '''SamImageProcessor''' def __init__( self , lowercase_) -> int: super().__init__(lowercase_) __snake_case = self.image_processor __snake_case = -1_0 __snake_case = self.image_processor.size['longest_edge'] def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_ = None , **lowercase_ , ) -> BatchEncoding: __snake_case = self.image_processor( lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # pop arguments that are not used in the foward but used nevertheless __snake_case = encoding_image_processor['original_sizes'] if hasattr(lowercase_ , 'numpy'): # Checks if Torch or TF tensor __snake_case = original_sizes.numpy() __snake_case , __snake_case , __snake_case = self._check_and_preprocess_points( input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , ) __snake_case = self._normalize_and_convert( lowercase_ , lowercase_ , input_points=lowercase_ , input_labels=lowercase_ , input_boxes=lowercase_ , return_tensors=lowercase_ , ) return encoding_image_processor def _a ( self , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_="pt" , ) -> Union[str, Any]: if input_points is not None: if len(lowercase_) != len(lowercase_): __snake_case = [ self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0]) for point in input_points ] else: __snake_case = [ self._normalize_coordinates(self.target_size , lowercase_ , lowercase_) for point, original_size in zip(lowercase_ , lowercase_) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points): if input_labels is not None: __snake_case , __snake_case = self._pad_points_and_labels(lowercase_ , lowercase_) __snake_case = np.array(lowercase_) if input_labels is not None: __snake_case = np.array(lowercase_) if input_boxes is not None: if len(lowercase_) != len(lowercase_): __snake_case = [ self._normalize_coordinates(self.target_size , lowercase_ , original_sizes[0] , is_bounding_box=lowercase_) for box in input_boxes ] else: __snake_case = [ self._normalize_coordinates(self.target_size , lowercase_ , lowercase_ , is_bounding_box=lowercase_) for box, original_size in zip(lowercase_ , lowercase_) ] __snake_case = np.array(lowercase_) if input_boxes is not None: if return_tensors == "pt": __snake_case = torch.from_numpy(lowercase_) # boxes batch size of 1 by default __snake_case = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes elif return_tensors == "tf": __snake_case = tf.convert_to_tensor(lowercase_) # boxes batch size of 1 by default __snake_case = tf.expand_dims(lowercase_ , 1) if len(input_boxes.shape) != 3 else input_boxes encoding_image_processor.update({'input_boxes': input_boxes}) if input_points is not None: if return_tensors == "pt": __snake_case = torch.from_numpy(lowercase_) # point batch size of 1 by default __snake_case = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points elif return_tensors == "tf": __snake_case = tf.convert_to_tensor(lowercase_) # point batch size of 1 by default __snake_case = tf.expand_dims(lowercase_ , 1) if len(input_points.shape) != 4 else input_points encoding_image_processor.update({'input_points': input_points}) if input_labels is not None: if return_tensors == "pt": __snake_case = torch.from_numpy(lowercase_) # point batch size of 1 by default __snake_case = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels elif return_tensors == "tf": __snake_case = tf.convert_to_tensor(lowercase_) # point batch size of 1 by default __snake_case = tf.expand_dims(lowercase_ , 1) if len(input_labels.shape) != 3 else input_labels encoding_image_processor.update({'input_labels': input_labels}) return encoding_image_processor def _a ( self , lowercase_ , lowercase_) -> Any: __snake_case = max([point.shape[0] for point in input_points]) __snake_case = [] for i, point in enumerate(lowercase_): if point.shape[0] != expected_nb_points: __snake_case = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0) __snake_case = np.append(input_labels[i] , [self.point_pad_value]) processed_input_points.append(lowercase_) __snake_case = processed_input_points return input_points, input_labels def _a ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=False) -> np.ndarray: __snake_case , __snake_case = original_size __snake_case , __snake_case = self.image_processor._get_preprocess_shape(lowercase_ , longest_edge=lowercase_) __snake_case = deepcopy(lowercase_).astype(lowercase_) if is_bounding_box: __snake_case = coords.reshape(-1 , 2 , 2) __snake_case = coords[..., 0] * (new_w / old_w) __snake_case = coords[..., 1] * (new_h / old_h) if is_bounding_box: __snake_case = coords.reshape(-1 , 4) return coords def _a ( self , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]: if input_points is not None: if hasattr(lowercase_ , 'numpy'): # Checks for TF or Torch tensor __snake_case = input_points.numpy().tolist() if not isinstance(lowercase_ , lowercase_) or not isinstance(input_points[0] , lowercase_): raise ValueError('Input points must be a list of list of floating points.') __snake_case = [np.array(lowercase_) for input_point in input_points] else: __snake_case = None if input_labels is not None: if hasattr(lowercase_ , 'numpy'): __snake_case = input_labels.numpy().tolist() if not isinstance(lowercase_ , lowercase_) or not isinstance(input_labels[0] , lowercase_): raise ValueError('Input labels must be a list of list integers.') __snake_case = [np.array(lowercase_) for label in input_labels] else: __snake_case = None if input_boxes is not None: if hasattr(lowercase_ , 'numpy'): __snake_case = input_boxes.numpy().tolist() if ( not isinstance(lowercase_ , lowercase_) or not isinstance(input_boxes[0] , lowercase_) or not isinstance(input_boxes[0][0] , lowercase_) ): raise ValueError('Input boxes must be a list of list of list of floating points.') __snake_case = [np.array(lowercase_).astype(np.floataa) for box in input_boxes] else: __snake_case = None return input_points, input_labels, input_boxes @property def _a ( self) -> Optional[int]: __snake_case = self.image_processor.model_input_names return list(dict.fromkeys(lowercase_)) def _a ( self , *lowercase_ , **lowercase_) -> Union[str, Any]: return self.image_processor.post_process_masks(*lowercase_ , **lowercase_)
676
import numpy as np def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def A ( snake_case__ : np.ndarray ) -> np.ndarray: '''simple docstring''' return vector * sigmoid(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
676
1
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def A ( snake_case__ : Dataset , snake_case__ : Dict[str, str] ) -> Optional[Any]: '''simple docstring''' __snake_case = args.log_outputs __snake_case = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric __snake_case = load_metric('wer' ) __snake_case = load_metric('cer' ) # compute metrics __snake_case = wer.compute(references=result['target'] , predictions=result['prediction'] ) __snake_case = cer.compute(references=result['target'] , predictions=result['prediction'] ) # print & log results __snake_case = f"WER: {wer_result}\nCER: {cer_result}" print(snake_case__ ) with open(f"{dataset_id}_eval_results.txt" , 'w' ) as f: f.write(snake_case__ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __snake_case = f"log_{dataset_id}_predictions.txt" __snake_case = f"log_{dataset_id}_targets.txt" with open(snake_case__ , 'w' ) as p, open(snake_case__ , 'w' ) as t: # mapping function to write output def write_to_file(snake_case__ : Union[str, Any] , snake_case__ : Tuple ): p.write(f"{i}" + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(f"{i}" + '\n' ) t.write(batch['target'] + '\n' ) result.map(snake_case__ , with_indices=snake_case__ ) def A ( snake_case__ : str ) -> str: '''simple docstring''' __snake_case = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __snake_case = re.sub(snake_case__ , '' , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __snake_case = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: __snake_case = ' '.join(text.split(snake_case__ ) ) return text def A ( snake_case__ : int ) -> Optional[int]: '''simple docstring''' # load dataset __snake_case = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case__ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __snake_case = AutoFeatureExtractor.from_pretrained(args.model_id ) __snake_case = feature_extractor.sampling_rate # resample audio __snake_case = dataset.cast_column('audio' , Audio(sampling_rate=snake_case__ ) ) # load eval pipeline if args.device is None: __snake_case = 0 if torch.cuda.is_available() else -1 __snake_case = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case__ : Optional[Any] ): __snake_case = asr( batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __snake_case = prediction['text'] __snake_case = normalize_text(batch['sentence'] ) return batch # run inference on all examples __snake_case = dataset.map(snake_case__ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case__ , snake_case__ ) if __name__ == "__main__": UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers" ) parser.add_argument( "--dataset", type=str, required=True, help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets", ) parser.add_argument( "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice" ) parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`") parser.add_argument( "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds." ) parser.add_argument( "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second." ) parser.add_argument( "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis." ) parser.add_argument( "--device", type=int, default=None, help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.", ) UpperCAmelCase__ : str = parser.parse_args() main(args)
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True __snake_case = 4 __snake_case = (1 << p) - 1 for _ in range(p - 2 ): __snake_case = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
676
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class __lowercase ( lowerCamelCase__ , lowerCamelCase__ ): @register_to_config def __init__( self , *, lowercase_ = 4 , lowercase_ = 7_6_8 , lowercase_ , lowercase_ , ) -> Tuple: super().__init__() __snake_case = nn.Parameter(torch.zeros(lowercase_)) # parameters for additional clip time embeddings __snake_case = nn.Linear(lowercase_ , lowercase_) __snake_case = nn.Linear(lowercase_ , lowercase_) # parameters for encoder hidden states __snake_case = clip_extra_context_tokens __snake_case = nn.Linear( lowercase_ , self.clip_extra_context_tokens * cross_attention_dim) __snake_case = nn.Linear(lowercase_ , lowercase_) __snake_case = nn.LayerNorm(lowercase_) def _a ( self , *, lowercase_ , lowercase_ , lowercase_ , lowercase_) -> Optional[int]: if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __snake_case = image_embeddings.shape[0] __snake_case = self.learned_classifier_free_guidance_embeddings.unsqueeze(0) __snake_case = classifier_free_guidance_embeddings.expand( lowercase_ , -1) __snake_case = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __snake_case = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __snake_case = self.embedding_proj(lowercase_) __snake_case = self.clip_image_embeddings_project_to_time_embeddings(lowercase_) __snake_case = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __snake_case = self.clip_extra_context_tokens_proj(lowercase_) __snake_case = clip_extra_context_tokens.reshape(lowercase_ , -1 , self.clip_extra_context_tokens) __snake_case = clip_extra_context_tokens.permute(0 , 2 , 1) __snake_case = self.encoder_hidden_states_proj(lowercase_) __snake_case = self.text_encoder_hidden_states_norm(lowercase_) __snake_case = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1) return text_encoder_hidden_states, additive_clip_time_embeddings
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ : Optional[Any] = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Union[str, Any] = ["CLIPFeatureExtractor"] UpperCAmelCase__ : Optional[int] = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Any = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Dict = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys UpperCAmelCase__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
def A ( snake_case__ : list[int] , snake_case__ : int ) -> bool: '''simple docstring''' __snake_case = len(snake_case__ ) __snake_case = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): __snake_case = True # sum is not zero and set is empty then false for i in range(1 , required_sum + 1 ): __snake_case = False for i in range(1 , arr_len + 1 ): for j in range(1 , required_sum + 1 ): if arr[i - 1] > j: __snake_case = subset[i - 1][j] if arr[i - 1] <= j: __snake_case = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
676
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
676
1
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
676
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def A ( snake_case__ : List[Any] ) -> Any: '''simple docstring''' __snake_case = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __snake_case = 4 __snake_case = 48 __snake_case = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = [6, 6, 6, 6] __snake_case = 60 __snake_case = [6, 6, 6, 6] __snake_case = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = 4 __snake_case = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __snake_case = 1 __snake_case = 1 __snake_case = 126 __snake_case = 7 __snake_case = 255.0 __snake_case = '' return config def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ) -> Union[str, Any]: '''simple docstring''' if "patch_embed.proj" in name and "layers" not in name: __snake_case = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __snake_case = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: __snake_case = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: __snake_case = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: __snake_case = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __snake_case = name.replace('attn' , 'attention.self' ) if "norm1" in name: __snake_case = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __snake_case = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __snake_case = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __snake_case = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: __snake_case = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: __snake_case = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: __snake_case = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: __snake_case = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: __snake_case = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": __snake_case = 'layernorm.weight' if name == "norm.bias": __snake_case = 'layernorm.bias' if "conv_first" in name: __snake_case = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __snake_case = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __snake_case = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: __snake_case = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: __snake_case = name.replace('upsample.2' , 'upsample.convolution_1' ) __snake_case = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": __snake_case = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) __snake_case = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: __snake_case = 'swin2sr.' + name return name def A ( snake_case__ : str , snake_case__ : List[Any] ) -> Dict: '''simple docstring''' for key in orig_state_dict.copy().keys(): __snake_case = orig_state_dict.pop(snake_case__ ) if "qkv" in key: __snake_case = key.split('.' ) __snake_case = int(key_split[1] ) __snake_case = int(key_split[4] ) __snake_case = config.embed_dim if "weight" in key: __snake_case = val[:dim, :] __snake_case = val[dim : dim * 2, :] __snake_case = val[-dim:, :] else: __snake_case = val[:dim] __snake_case = val[dim : dim * 2] __snake_case = val[-dim:] pass else: __snake_case = val return orig_state_dict def A ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : int ) -> Tuple: '''simple docstring''' __snake_case = get_config(snake_case__ ) __snake_case = SwinaSRForImageSuperResolution(snake_case__ ) model.eval() __snake_case = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' ) __snake_case = convert_state_dict(snake_case__ , snake_case__ ) __snake_case , __snake_case = model.load_state_dict(snake_case__ , strict=snake_case__ ) if len(snake_case__ ) > 0: raise ValueError('Missing keys when converting: {}'.format(snake_case__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(f"Unexpected key {key} in state_dict" ) # verify values __snake_case = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' __snake_case = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) __snake_case = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __snake_case = 126 if 'Jpeg' in checkpoint_url else 256 __snake_case = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __snake_case = transforms(snake_case__ ).unsqueeze(0 ) if config.num_channels == 1: __snake_case = pixel_values[:, 0, :, :].unsqueeze(1 ) __snake_case = model(snake_case__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __snake_case = torch.Size([1, 3, 512, 512] ) __snake_case = torch.tensor( [[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __snake_case = torch.Size([1, 3, 1024, 1024] ) __snake_case = torch.tensor( [[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] ) assert ( outputs.reconstruction.shape == expected_shape ), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case__ , atol=1e-3 ) print('Looks ok!' ) __snake_case = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } __snake_case = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(snake_case__ ) if push_to_hub: model.push_to_hub(f"caidas/{model_name}" ) processor.push_to_hub(f"caidas/{model_name}" ) if __name__ == "__main__": UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth", type=str, help="URL of the original Swin2SR checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.") UpperCAmelCase__ : Optional[Any] = parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
676
1
from maths.prime_check import is_prime def A ( snake_case__ : int ) -> int: '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): __snake_case = f"Input value of [number={number}] must be an integer" raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
676
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ : int = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Tuple = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ : Optional[Any] = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : int = ["BeitFeatureExtractor"] UpperCAmelCase__ : Union[str, Any] = ["BeitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[str] = [ "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BeitForImageClassification", "BeitForMaskedImageModeling", "BeitForSemanticSegmentation", "BeitModel", "BeitPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[str] = [ "FlaxBeitForImageClassification", "FlaxBeitForMaskedImageModeling", "FlaxBeitModel", "FlaxBeitPreTrainedModel", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys UpperCAmelCase__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
from __future__ import annotations class __lowercase : def __init__( self , lowercase_) -> None: __snake_case = data __snake_case = None __snake_case = None def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def A ( snake_case__ : Node | None ) -> int: '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def A ( snake_case__ : Node ) -> bool: '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def A ( ) -> None: # Main function for testing. '''simple docstring''' __snake_case = Node(1 ) __snake_case = Node(2 ) __snake_case = Node(3 ) __snake_case = Node(4 ) __snake_case = Node(5 ) __snake_case = Node(6 ) __snake_case = Node(7 ) __snake_case = Node(8 ) __snake_case = Node(9 ) print(is_full_binary_tree(snake_case__ ) ) print(depth_of_tree(snake_case__ ) ) print('Tree is: ' ) display(snake_case__ ) if __name__ == "__main__": main()
676
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCAmelCase__ : List[Any] = { "configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"], "processing_speech_to_text": ["Speech2TextProcessor"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[Any] = ["Speech2TextTokenizer"] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : List[str] = ["Speech2TextFeatureExtractor"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[Any] = [ "TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFSpeech2TextForConditionalGeneration", "TFSpeech2TextModel", "TFSpeech2TextPreTrainedModel", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Optional[int] = [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", "Speech2TextPreTrainedModel", ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
676
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING UpperCAmelCase__ : str = logging.get_logger(__name__) UpperCAmelCase__ : int = { "microsoft/table-transformer-detection": ( "https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json" ), } class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = '''table-transformer''' __UpperCAmelCase = ['''past_key_values'''] __UpperCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , lowercase_=True , lowercase_=None , lowercase_=3 , lowercase_=1_0_0 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=6 , lowercase_=2_0_4_8 , lowercase_=8 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=True , lowercase_="relu" , lowercase_=2_5_6 , lowercase_=0.1 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.02 , lowercase_=1.0 , lowercase_=False , lowercase_="sine" , lowercase_="resnet50" , lowercase_=True , lowercase_=False , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=1 , lowercase_=1 , lowercase_=5 , lowercase_=2 , lowercase_=0.1 , **lowercase_ , ) -> Optional[Any]: if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.') if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') __snake_case = CONFIG_MAPPING['resnet'](out_features=['stage4']) elif isinstance(lowercase_ , lowercase_): __snake_case = backbone_config.get('model_type') __snake_case = CONFIG_MAPPING[backbone_model_type] __snake_case = config_class.from_dict(lowercase_) # set timm attributes to None __snake_case , __snake_case , __snake_case = None, None, None __snake_case = use_timm_backbone __snake_case = backbone_config __snake_case = num_channels __snake_case = num_queries __snake_case = d_model __snake_case = encoder_ffn_dim __snake_case = encoder_layers __snake_case = encoder_attention_heads __snake_case = decoder_ffn_dim __snake_case = decoder_layers __snake_case = decoder_attention_heads __snake_case = dropout __snake_case = attention_dropout __snake_case = activation_dropout __snake_case = activation_function __snake_case = init_std __snake_case = init_xavier_std __snake_case = encoder_layerdrop __snake_case = decoder_layerdrop __snake_case = encoder_layers __snake_case = auxiliary_loss __snake_case = position_embedding_type __snake_case = backbone __snake_case = use_pretrained_backbone __snake_case = dilation # Hungarian matcher __snake_case = class_cost __snake_case = bbox_cost __snake_case = giou_cost # Loss coefficients __snake_case = mask_loss_coefficient __snake_case = dice_loss_coefficient __snake_case = bbox_loss_coefficient __snake_case = giou_loss_coefficient __snake_case = eos_coefficient super().__init__(is_encoder_decoder=lowercase_ , **lowercase_) @property def _a ( self) -> int: return self.encoder_attention_heads @property def _a ( self) -> int: return self.d_model class __lowercase ( lowerCamelCase__ ): __UpperCAmelCase = version.parse('''1.11''' ) @property def _a ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ('pixel_mask', {0: 'batch'}), ]) @property def _a ( self) -> float: return 1e-5 @property def _a ( self) -> int: return 1_2
676
1