code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase_ : def __init__( self : str ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[Any]=13 ,__lowerCamelCase : int=32 ,__lowerCamelCase : List[str]=3 ,__lowerCamelCase : int=4 ,__lowerCamelCase : Tuple=[10, 20, 30, 40] ,__lowerCamelCase : Optional[int]=[2, 2, 3, 2] ,__lowerCamelCase : int=True ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Union[str, Any]=37 ,__lowerCamelCase : Dict="gelu" ,__lowerCamelCase : int=10 ,__lowerCamelCase : Any=0.02 ,__lowerCamelCase : Dict=["stage2", "stage3", "stage4"] ,__lowerCamelCase : Tuple=[2, 3, 4] ,__lowerCamelCase : Dict=None ,): '''simple docstring''' a = parent a = batch_size a = image_size a = num_channels a = num_stages a = hidden_sizes a = depths a = is_training a = use_labels a = intermediate_size a = hidden_act a = num_labels a = initializer_range a = out_features a = out_indices a = scope def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] ,self.num_labels ) a = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : Optional[int] ): '''simple docstring''' a = ConvNextVaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ,__lowerCamelCase : int ): '''simple docstring''' a = ConvNextVaForImageClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ,labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[int] ): '''simple docstring''' a = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None a = None a = ConvNextVaBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = self.prepare_config_and_inputs() a = config_and_inputs a = {"""pixel_values""": pixel_values} return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.prepare_config_and_inputs() a = config_and_inputs a = {"""pixel_values""": pixel_values, """labels""": labels} return config, inputs_dict @require_torch class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ = ( {'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = ConvNextVaModelTester(self ) a = ConfigTester(self ,config_class=__lowerCamelCase ,has_text_modality=__lowerCamelCase ,hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return @unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: a = self.model_tester.prepare_config_and_inputs_with_labels() a = True if model_class.__name__ in [ *get_values(__lowerCamelCase ), *get_values(__lowerCamelCase ), ]: continue a = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.train() a = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase ) a = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: a = self.model_tester.prepare_config_and_inputs_with_labels() a = False a = True if ( model_class.__name__ in [*get_values(__lowerCamelCase ), *get_values(__lowerCamelCase )] or not model_class.supports_gradient_checkpointing ): continue a = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.gradient_checkpointing_enable() model.train() a = self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ,return_labels=__lowerCamelCase ) a = model(**__lowerCamelCase ).loss loss.backward() def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCamelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' def check_hidden_states_output(__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ): a = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) ) a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states a = self.model_tester.num_stages self.assertEqual(len(__lowerCamelCase ) ,expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ConvNextVaModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( ) -> int: """simple docstring""" a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(__lowerCamelCase ) a = self.default_image_processor a = prepare_img() a = preprocessor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCamelCase ) # verify the logits a = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape ,__lowerCamelCase ) a = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1e-4 ) )
351
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) a = '''The dog is cute and lives in the garden house''' a = jnp.array([tokenizer.encode(__lowerCamelCase )] ) a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim a = jnp.array( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) a = model(__lowerCamelCase )['''last_hidden_state'''] self.assertEqual(output.shape ,__lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
330
0
import math def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" a = 0 a = 0 while num > 0: a = num % 8 a = octal + (remainder * math.floor(math.pow(1_0, lowerCAmelCase__ ) )) counter += 1 a = math.floor(num / 8 ) # basically /= 8 without remainder if any # This formatting removes trailing '.0' from `octal`. return f"""0o{int(lowerCAmelCase__ )}""" def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: """simple docstring""" print('''\n2 in octal is:''' ) print(decimal_to_octal(2 ) ) # = 2 print('''\n8 in octal is:''' ) print(decimal_to_octal(8 ) ) # = 10 print('''\n65 in octal is:''' ) print(decimal_to_octal(6_5 ) ) # = 101 print('''\n216 in octal is:''' ) print(decimal_to_octal(2_1_6 ) ) # = 330 print('''\n512 in octal is:''' ) print(decimal_to_octal(5_1_2 ) ) # = 1000 print('''\n''' ) if __name__ == "__main__": main()
352
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ : Union[str, Any] = 16 UpperCamelCase__ : Dict = 32 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 1_6 ) -> Tuple: """simple docstring""" a = AutoTokenizer.from_pretrained('''bert-base-cased''' ) a = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(snake_case_ ): # max_length=None => use the model max length (it's actually the default) a = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a = datasets.map( snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(snake_case_ ): # On TPU it's best to pad everything to the same length or training will be very slow. a = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a = 1_6 elif accelerator.mixed_precision != "no": a = 8 else: a = None return tokenizer.pad( snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', ) # Instantiate dataloaders. a = DataLoader( tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) a = DataLoader( tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ : int = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1": a = 2 # Initialize accelerator a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config['''lr'''] a = int(config['''num_epochs'''] ) a = int(config['''seed'''] ) a = int(config['''batch_size'''] ) a = evaluate.load('''glue''', '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case_ ) def inner_training_loop(snake_case_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Instantiate optimizer a = AdamW(params=model.parameters(), lr=snake_case_ ) a , a = get_dataloaders(snake_case_, snake_case_ ) # Instantiate scheduler a = get_linear_schedule_with_warmup( optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a = model(**snake_case_ ) a = outputs.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a = model(**snake_case_ ) a = outputs.logits.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case_, references=snake_case_, ) a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""", snake_case_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: """simple docstring""" a = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' ) a = parser.parse_args() a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(snake_case_, snake_case_ ) if __name__ == "__main__": main()
330
0
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable UpperCamelCase__ : List[Any] = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Union[str, Any] = ["""GPTNeoXTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = [ """GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoXForCausalLM""", """GPTNeoXForQuestionAnswering""", """GPTNeoXForSequenceClassification""", """GPTNeoXForTokenClassification""", """GPTNeoXLayer""", """GPTNeoXModel""", """GPTNeoXPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys UpperCamelCase__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
353
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } UpperCamelCase__ : Optional[Any] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for attribute in key.split('''.''' ): a = getattr(snake_case_, snake_case_ ) if weight_type is not None: a = getattr(snake_case_, snake_case_ ).shape else: a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value else: a = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = [] a = fairseq_model.state_dict() a = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', ) a = True else: for key, mapped_key in MAPPING.items(): a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue a = True if "*" in mapped_key: a = name.split(snake_case_ )[0].split('''.''' )[-2] a = mapped_key.replace('''*''', snake_case_ ) if "weight_g" in name: a = '''weight_g''' elif "weight_v" in name: a = '''weight_v''' elif "bias" in name: a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj a = '''weight''' else: a = None set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = full_name.split('''conv_layers.''' )[-1] a = name.split('''.''' ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Union[str, Any]: """simple docstring""" if config_path is not None: a = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: a = UniSpeechSatConfig() a = '''''' if is_finetuned: a = UniSpeechSatForCTC(snake_case_ ) else: a = UniSpeechSatForPreTraining(snake_case_ ) a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) a = model[0].eval() recursively_load_weights(snake_case_, snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase__ : int = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
0
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : int = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n" class lowerCamelCase_ ( lowercase_ ): @add_start_docstrings(a__ ) def __call__( self : List[str] ,__lowerCamelCase : Dict ,__lowerCamelCase : List[str] ,**__lowerCamelCase : List[Any] ): '''simple docstring''' raise NotImplementedError('''StoppingCriteria needs to be subclassed''' ) class lowerCamelCase_ ( lowercase_ ): def __init__( self : Tuple ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[int] = None ): '''simple docstring''' a = max_length a = max_position_embeddings @add_start_docstrings(a__ ) def __call__( self : Dict ,__lowerCamelCase : Dict ,__lowerCamelCase : str ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = input_ids.shape[-1] a = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( '''This is a friendly reminder - the current text generation call will exceed the model\'s predefined ''' F"""maximum length ({self.max_position_embeddings}). Depending on the model, you may observe """ '''exceptions, performance degradation, or nothing at all.''' ) return is_done class lowerCamelCase_ ( lowercase_ ): def __init__( self : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' warnings.warn( '''The class `MaxNewTokensCriteria` is deprecated. ''' F"""Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` """ '''with `max_length = start_length + max_new_tokens` instead.''' ,a__ ,) a = start_length a = max_new_tokens a = start_length + max_new_tokens @add_start_docstrings(a__ ) def __call__( self : Dict ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Dict ,**__lowerCamelCase : int ): '''simple docstring''' return input_ids.shape[-1] >= self.max_length class lowerCamelCase_ ( lowercase_ ): def __init__( self : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : Any = None ): '''simple docstring''' a = max_time a = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(a__ ) def __call__( self : List[str] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[int] ): '''simple docstring''' return time.time() - self.initial_timestamp > self.max_time class lowerCamelCase_ ( lowercase_ ): @add_start_docstrings(a__ ) def __call__( self : List[str] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : str ,**__lowerCamelCase : Optional[int] ): '''simple docstring''' return any(criteria(a__ ,a__ ) for criteria in self ) @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' for stopping_criterium in self: if isinstance(a__ ,a__ ): return stopping_criterium.max_length elif isinstance(a__ ,a__ ): return stopping_criterium.max_length return None def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Optional[int]: """simple docstring""" a = stopping_criteria.max_length a = deepcopy(snake_case_ ) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''', snake_case_ ) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=snake_case_ ) ) return new_stopping_criteria
354
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]: """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" class lowerCamelCase_ : def __init__( self : Dict ,__lowerCamelCase : List[str] ): '''simple docstring''' a = metric_id class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() ) @pytest.mark.parametrize( '''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple: """simple docstring""" if "tmp_path" in args: a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(snake_case_, match='''https://huggingface.co/docs/evaluate''' ): func(*snake_case_ )
330
0
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. UpperCamelCase__ : Any = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]: """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main a = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(snake_case_, id=snake_case_ )
355
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'luke' def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase ) a = vocab_size a = entity_vocab_size a = hidden_size a = entity_emb_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = use_entity_aware_attention a = classifier_dropout
330
0
import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase_ ( lowerCamelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = CodeGenTokenizer SCREAMING_SNAKE_CASE_ = CodeGenTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = {'add_prefix_space': True} SCREAMING_SNAKE_CASE_ = False def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt a = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] a = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) ) a = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] a = {'''unk_token''': '''<unk>'''} a = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] ) a = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write(json.dumps(__lowerCamelCase ) + '''\n''' ) with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__lowerCamelCase ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,**__lowerCamelCase : Optional[int] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,**__lowerCamelCase : Optional[int] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Dict ): '''simple docstring''' a = '''lower newer''' a = '''lower newer''' return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) a = '''lower newer''' a = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] a = tokenizer.tokenize(__lowerCamelCase ,add_prefix_space=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) a = tokens + [tokenizer.unk_token] a = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return a = self.get_tokenizer() a = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase ) a = '''lower newer''' # Testing tokenization a = tokenizer.tokenize(__lowerCamelCase ,add_prefix_space=__lowerCamelCase ) a = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) # Testing conversion to ids without special tokens a = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,add_prefix_space=__lowerCamelCase ) a = rust_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) # Testing conversion to ids with special tokens a = self.get_rust_tokenizer(add_prefix_space=__lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase ,add_prefix_space=__lowerCamelCase ) a = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) # Testing the unknown token a = tokens + [rust_tokenizer.unk_token] a = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,*__lowerCamelCase : int ,**__lowerCamelCase : Union[str, Any] ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Tuple=15 ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase ,**__lowerCamelCase ) # Simple input a = '''This is a simple input''' a = ['''This is a simple input 1''', '''This is a simple input 2'''] a = ('''This is a simple input''', '''This is a pair''') a = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(__lowerCamelCase ,tokenizer_r.encode ,__lowerCamelCase ,max_length=__lowerCamelCase ,padding='''max_length''' ) # Simple input self.assertRaises(__lowerCamelCase ,tokenizer_r.encode_plus ,__lowerCamelCase ,max_length=__lowerCamelCase ,padding='''max_length''' ) # Simple input self.assertRaises( __lowerCamelCase ,tokenizer_r.batch_encode_plus ,__lowerCamelCase ,max_length=__lowerCamelCase ,padding='''max_length''' ,) # Pair input self.assertRaises(__lowerCamelCase ,tokenizer_r.encode ,__lowerCamelCase ,max_length=__lowerCamelCase ,padding='''max_length''' ) # Pair input self.assertRaises(__lowerCamelCase ,tokenizer_r.encode_plus ,__lowerCamelCase ,max_length=__lowerCamelCase ,padding='''max_length''' ) # Pair input self.assertRaises( __lowerCamelCase ,tokenizer_r.batch_encode_plus ,__lowerCamelCase ,max_length=__lowerCamelCase ,padding='''max_length''' ,) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token='''<pad>''' ) # Simple input a = '''This is a simple input''' a = ['''This is a simple input looooooooong''', '''This is a simple input'''] a = ('''This is a simple input''', '''This is a pair''') a = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] a = tokenizer.pad_token_id a = tokenizer(__lowerCamelCase ,padding='''max_length''' ,max_length=30 ,return_tensors='''np''' ) a = tokenizer(__lowerCamelCase ,padding=__lowerCamelCase ,truncate=__lowerCamelCase ,return_tensors='''np''' ) a = tokenizer(*__lowerCamelCase ,padding='''max_length''' ,max_length=60 ,return_tensors='''np''' ) a = tokenizer(__lowerCamelCase ,padding=__lowerCamelCase ,truncate=__lowerCamelCase ,return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] ,30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] ,33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] ,60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] ,52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' a = '''$$$''' a = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=__lowerCamelCase ,add_bos_token=__lowerCamelCase ) a = '''This is a simple input''' a = ['''This is a simple input 1''', '''This is a simple input 2'''] a = tokenizer.bos_token_id a = tokenizer(__lowerCamelCase ) a = tokenizer(__lowerCamelCase ) self.assertEqual(out_s.input_ids[0] ,__lowerCamelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) a = tokenizer.decode(out_s.input_ids ) a = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] ,__lowerCamelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) a = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' a = '''\nif len_a > len_b: result = a\nelse: result = b''' a = tokenizer.encode(__lowerCamelCase ) a = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] a = tokenizer.decode(__lowerCamelCase ,truncate_before_pattern=__lowerCamelCase ) self.assertEqual(__lowerCamelCase ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' pass
356
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": UpperCamelCase__ : Optional[int] = pd.read_csv("""sample_data.csv""", header=None) UpperCamelCase__ : Tuple = df.shape[:1][0] # If you're using some other dataset input the target column UpperCamelCase__ : List[Any] = df.iloc[:, 1:2] UpperCamelCase__ : Union[str, Any] = actual_data.values.reshape(len_data, 1) UpperCamelCase__ : List[Any] = MinMaxScaler().fit_transform(actual_data) UpperCamelCase__ : Optional[Any] = 10 UpperCamelCase__ : int = 5 UpperCamelCase__ : List[str] = 20 UpperCamelCase__ : Optional[int] = len_data - periods * look_back UpperCamelCase__ : Union[str, Any] = actual_data[:division] UpperCamelCase__ : str = actual_data[division - look_back :] UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = [], [] UpperCamelCase__ , UpperCamelCase__ : str = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) UpperCamelCase__ : List[str] = np.array(train_x) UpperCamelCase__ : Optional[Any] = np.array(test_x) UpperCamelCase__ : Tuple = np.array([list(i.ravel()) for i in train_y]) UpperCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y]) UpperCamelCase__ : Union[str, Any] = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") UpperCamelCase__ : Tuple = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) UpperCamelCase__ : Tuple = model.predict(x_test)
330
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCamelCase__ : Dict = logging.get_logger(__name__) UpperCamelCase__ : int = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } UpperCamelCase__ : Tuple = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" a = {} with open(UpperCAmelCase_, '''r''' ) as file: for line_number, line in enumerate(UpperCAmelCase_ ): a = line.strip() if line: a = line.split() a = line_number a = words[0] a = value return result def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> str: """simple docstring""" for attribute in key.split('''.''' ): a = getattr(UpperCAmelCase_, UpperCAmelCase_ ) a = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase_ ): a = PARAM_MAPPING[full_name.split('''.''' )[-1]] a = 'param' if weight_type is not None and weight_type != "param": a = getattr(UpperCAmelCase_, UpperCAmelCase_ ).shape elif weight_type is not None and weight_type == "param": a = hf_pointer for attribute in hf_param_name.split('''.''' ): a = getattr(UpperCAmelCase_, UpperCAmelCase_ ) a = shape_pointer.shape # let's reduce dimension a = value[0] else: a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): a = getattr(UpperCAmelCase_, UpperCAmelCase_ ) a = value else: a = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(UpperCAmelCase_ ): a = PARAM_MAPPING[full_name.split('''.''' )[-1]] a = 'param' if weight_type is not None and weight_type != "param": a = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": a = '.'.join([key, hf_param_name] ) else: a = key a = value if 'lm_head' in full_key else value[0] UpperCamelCase__ : str = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None ) -> Optional[Any]: """simple docstring""" a = False for key, mapped_key in MAPPING.items(): a = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: a = True if "*" in mapped_key: a = name.split(UpperCAmelCase_ )[0].split('''.''' )[-2] a = mapped_key.replace('''*''', UpperCAmelCase_ ) if "weight_g" in name: a = 'weight_g' elif "weight_v" in name: a = 'weight_v' elif "bias" in name: a = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj a = 'weight' else: a = None if hf_dict is not None: rename_dict(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) else: set_recursively(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) return is_used return is_used def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]: """simple docstring""" a = [] a = fairseq_model.state_dict() a = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, hf_model.config.feat_extract_norm == '''group''', ) a = True else: a = load_wavaveca_layer(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ ) if not is_used: unused_weights.append(UpperCAmelCase_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[Any]: """simple docstring""" a = full_name.split('''conv_layers.''' )[-1] a = name.split('''.''' ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(UpperCAmelCase_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True, snake_case_=False ) -> Tuple: """simple docstring""" if config_path is not None: a = WavaVecaConfig.from_pretrained(UpperCAmelCase_ ) else: a = WavaVecaConfig() if is_seq_class: a = read_txt_into_dict(UpperCAmelCase_ ) a = idalabel a = WavaVecaForSequenceClassification(UpperCAmelCase_ ) a = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=1_6_0_0_0, padding_value=0, do_normalize=UpperCAmelCase_, return_attention_mask=UpperCAmelCase_, ) feature_extractor.save_pretrained(UpperCAmelCase_ ) elif is_finetuned: if dict_path: a = Dictionary.load(UpperCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq a = target_dict.pad_index a = target_dict.bos_index a = target_dict.eos_index a = len(target_dict.symbols ) a = os.path.join(UpperCAmelCase_, '''vocab.json''' ) if not os.path.isdir(UpperCAmelCase_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCAmelCase_ ) ) return os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ ) a = target_dict.indices # fairseq has the <pad> and <s> switched a = 0 a = 1 with open(UpperCAmelCase_, '''w''', encoding='''utf-8''' ) as vocab_handle: json.dump(UpperCAmelCase_, UpperCAmelCase_ ) a = WavaVecaCTCTokenizer( UpperCAmelCase_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=UpperCAmelCase_, ) a = True if config.feat_extract_norm == 'layer' else False a = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=1_6_0_0_0, padding_value=0, do_normalize=UpperCAmelCase_, return_attention_mask=UpperCAmelCase_, ) a = WavaVecaProcessor(feature_extractor=UpperCAmelCase_, tokenizer=UpperCAmelCase_ ) processor.save_pretrained(UpperCAmelCase_ ) a = WavaVecaForCTC(UpperCAmelCase_ ) else: a = WavaVecaForPreTraining(UpperCAmelCase_ ) if is_finetuned or is_seq_class: a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: a = argparse.Namespace(task='''audio_pretraining''' ) a = fairseq.tasks.setup_task(UpperCAmelCase_ ) a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=UpperCAmelCase_ ) a = model[0].eval() recursively_load_weights(UpperCAmelCase_, UpperCAmelCase_, not is_finetuned ) hf_wavavec.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": UpperCamelCase__ : Any = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) UpperCamelCase__ : Optional[Any] = parser.parse_args() UpperCamelCase__ : List[str] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
357
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple: """simple docstring""" a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = 0.01 with locka.acquire(): with pytest.raises(snake_case_ ): a = time.time() locka.acquire(snake_case_ ) assert time.time() - _start > timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" a = '''a''' * 1_0_0_0 + '''.lock''' a = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(snake_case_ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 a = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(snake_case_ ): locka.acquire(0 )
330
0
import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": UpperCamelCase__ : Tuple = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) UpperCamelCase__ : Dict = parser.parse_args() UpperCamelCase__ : str = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
358
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Dict = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'vit_mae' def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = decoder_num_attention_heads a = decoder_hidden_size a = decoder_num_hidden_layers a = decoder_intermediate_size a = mask_ratio a = norm_pix_loss
330
0
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) UpperCamelCase__ : Dict = logging.getLogger() def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: """simple docstring""" a = argparse.ArgumentParser() parser.add_argument('''-f''' ) a = parser.parse_args() return args.f class lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ ): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = logging.StreamHandler(sys.stdout ) logger.addHandler(A__ ) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Dict ): '''simple docstring''' a = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 ,'''run_glue_deebert.py''' ) with patch.object(A__ ,'''argv''' ,A__ ): a = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(A__ ,0.666 ) @slow @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(A__ ) a = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(A__ ) a = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(A__ )
359
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]: """simple docstring""" stooge(snake_case_, 0, len(snake_case_ ) - 1 ) return arr def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]: """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: a , a = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: a = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) # Recursively sort last 2/3 elements stooge(snake_case_, i + t, (snake_case_) ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) if __name__ == "__main__": UpperCamelCase__ : Dict = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase__ : Optional[int] = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
330
0
UpperCamelCase__ : List[str] = tuple[float, float, float] UpperCamelCase__ : Tuple = tuple[float, float, float] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Vectorad: """simple docstring""" a = end_pointa[0] - end_pointa[0] a = end_pointa[1] - end_pointa[1] a = end_pointa[2] - end_pointa[2] return (x, y, z) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Vectorad: """simple docstring""" a = ab[1] * ac[2] - ab[2] * ac[1] # *i a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j a = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> bool: """simple docstring""" return tuple(round(snake_case_, snake_case_ ) for x in vector ) == (0, 0, 0) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ = 1_0 ) -> bool: """simple docstring""" a = create_vector(snake_case_, snake_case_ ) a = create_vector(snake_case_, snake_case_ ) return is_zero_vector(get_ad_vectors_cross(snake_case_, snake_case_ ), snake_case_ )
360
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } UpperCamelCase__ : Union[str, Any] = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } UpperCamelCase__ : str = { """jukebox""": 512, } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_LYRIC_TOKENS_SIZES SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=["v3", "v2", "v2"] ,__lowerCamelCase : List[Any]=5_12 ,__lowerCamelCase : Tuple=5 ,__lowerCamelCase : List[Any]="<|endoftext|>" ,**__lowerCamelCase : List[str] ,): '''simple docstring''' a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token super().__init__( unk_token=__lowerCamelCase ,n_genres=__lowerCamelCase ,version=__lowerCamelCase ,max_n_lyric_tokens=__lowerCamelCase ,**__lowerCamelCase ,) a = version a = max_n_lyric_tokens a = n_genres with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) a = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: a = oov.replace(r'''\-\'''' ,r'''\-+\'''' ) a = regex.compile(__lowerCamelCase ) a = {v: k for k, v in self.artists_encoder.items()} a = {v: k for k, v in self.genres_encoder.items()} a = {v: k for k, v in self.lyrics_encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ): '''simple docstring''' a = [self.artists_encoder.get(__lowerCamelCase ,0 ) for artist in list_artists] for genres in range(len(__lowerCamelCase ) ): a = [self.genres_encoder.get(__lowerCamelCase ,0 ) for genre in list_genres[genres]] a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) a = [[self.lyrics_encoder.get(__lowerCamelCase ,0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[str] ): '''simple docstring''' return list(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' a , a , a = self.prepare_for_tokenization(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = self._tokenize(__lowerCamelCase ) return artist, genre, lyrics def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": a = artists[idx].lower() a = [genres[idx].lower()] else: a = self._normalize(artists[idx] ) + '''.v2''' a = [ self._normalize(__lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' a = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )} a = 0 a = len(__lowerCamelCase ) + 1 a = self.vocab a = {v: k for k, v in self.vocab.items()} a = '''''' else: a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) a = self._run_strip_accents(__lowerCamelCase ) a = lyrics.replace('''\\''' ,'''\n''' ) a = self.out_of_vocab.sub('''''' ,__lowerCamelCase ), [], [] return artists, genres, lyrics def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ): '''simple docstring''' a = unicodedata.normalize('''NFD''' ,__lowerCamelCase ) a = [] for char in text: a = unicodedata.category(__lowerCamelCase ) if cat == "Mn": continue output.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ): '''simple docstring''' a = ( [chr(__lowerCamelCase ) for i in range(ord('''a''' ) ,ord('''z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''A''' ) ,ord('''Z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''0''' ) ,ord('''9''' ) + 1 )] + ['''.'''] ) a = frozenset(__lowerCamelCase ) a = re.compile(r'''_+''' ) a = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) a = pattern.sub('''_''' ,__lowerCamelCase ).strip('''_''' ) return text def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[str] ): '''simple docstring''' return " ".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : bool = False ): '''simple docstring''' if not isinstance(__lowerCamelCase ,__lowerCamelCase ): a = TensorType(__lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf a = tf.constant a = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch a = torch.tensor a = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 a = jnp.array a = _is_jax else: a = np.asarray a = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: a = [inputs] if not is_tensor(__lowerCamelCase ): a = as_tensor(__lowerCamelCase ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str]="" ,__lowerCamelCase : List[Any]="pt" ): '''simple docstring''' a = [0, 0, 0] a = [artist] * len(self.version ) a = [genres] * len(self.version ) a , a , a = self.tokenize(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a , a , a = self._convert_token_to_id(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = [-INFINITY] * len(full_tokens[-1] ) a = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=__lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=__lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : str ): '''simple docstring''' a = self.artists_decoder.get(__lowerCamelCase ) a = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index] a = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
330
0
import logging import os from .state import PartialState class lowerCamelCase_ ( logging.LoggerAdapter ): @staticmethod def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase : str ): '''simple docstring''' a = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ,*__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Tuple ): '''simple docstring''' if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) a = kwargs.pop('''main_process_only''' ,__A ) a = kwargs.pop('''in_order''' ,__A ) if self.isEnabledFor(__A ): if self._should_log(__A ): a = self.process(__A ,__A ) self.logger.log(__A ,__A ,*__A ,**__A ) elif in_order: a = PartialState() for i in range(state.num_processes ): if i == state.process_index: a = self.process(__A ,__A ) self.logger.log(__A ,__A ,*__A ,**__A ) state.wait_for_everyone() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = None ) -> Dict: """simple docstring""" if log_level is None: a = os.environ.get('''ACCELERATE_LOG_LEVEL''', snake_case_ ) a = logging.getLogger(snake_case_ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case_, {} )
361
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test UpperCamelCase__ : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab)))) UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ : Optional[Any] = Path(tmpdirname) UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) UpperCamelCase__ : Dict = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase__ : Union[str, Any] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""") UpperCamelCase__ : Tuple = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
330
0
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCamelCase_ ( a_ , unittest.TestCase ): # TODO: is there an appropriate internal test set? SCREAMING_SNAKE_CASE_ = 'ssube/stable-diffusion-x4-upscaler-onnx' def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Union[str, Any]=0 ): '''simple docstring''' a = floats_tensor((1, 3, 1_28, 1_28) ,rng=random.Random(__a ) ) a = torch.manual_seed(__a ) a = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__a ) a = self.get_dummy_inputs() a = pipe(**__a ).images a = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' ) a = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=__a ) pipe.set_progress_bar_config(disable=__a ) a = self.get_dummy_inputs() a = pipe(**__a ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' ) a = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) a = self.get_dummy_inputs() a = pipe(**__a ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' ) a = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) a = self.get_dummy_inputs() a = pipe(**__a ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' a = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='''CPUExecutionProvider''' ) a = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__a ) a = self.get_dummy_inputs() a = pipe(**__a ).images a = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array( [0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCamelCase_ ( unittest.TestCase ): @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = ort.SessionOptions() a = False return options def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) a = init_image.resize((1_28, 1_28) ) # using the PNDM scheduler by default a = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=__a ) a = '''A fantasy landscape, trending on artstation''' a = torch.manual_seed(0 ) a = pipe( prompt=__a ,image=__a ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=__a ,output_type='''np''' ,) a = output.images a = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) a = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) a = init_image.resize((1_28, 1_28) ) a = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' ,subfolder='''scheduler''' ) a = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' ,scheduler=__a ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=__a ) a = '''A fantasy landscape, trending on artstation''' a = torch.manual_seed(0 ) a = pipe( prompt=__a ,image=__a ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=__a ,output_type='''np''' ,) a = output.images a = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 5_12, 3) a = np.array( [0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
362
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) UpperCamelCase__ : Optional[Any] = """bert-base-cased""" UpperCamelCase__ : int = """fp16""" UpperCamelCase__ : str = """bf16""" UpperCamelCase__ : List[Any] = [FPaa, BFaa] @require_fsdp @require_cuda class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' super().setUp() a = dict( ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = F"""{i + 1}""" a = strategy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = prefetch_policy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = state_dict_type with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = AutoModel.from_pretrained(__lowerCamelCase ) for policy in FSDP_AUTO_WRAP_POLICY: a = self.dist_env.copy() a = policy if policy == "TRANSFORMER_BASED_WRAP": a = '''BertLayer''' elif policy == "SIZE_BASED_WRAP": a = '''2000''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) a = self.dist_env.copy() a = '''TRANSFORMER_BASED_WRAP''' a = '''T5Layer''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() with self.assertRaises(__lowerCamelCase ) as cm: fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) ) a = self.dist_env.copy() a = '''SIZE_BASED_WRAP''' a = '''0''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: a = self.dist_env.copy() a = mp_dtype with mockenv_context(**__lowerCamelCase ): a = Accelerator() if mp_dtype == "fp16": a = torch.floataa elif mp_dtype == "bf16": a = torch.bfloataa a = MixedPrecision(param_dtype=__lowerCamelCase ,reduce_dtype=__lowerCamelCase ,buffer_dtype=__lowerCamelCase ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__lowerCamelCase ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,__lowerCamelCase ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: a = self.dist_env.copy() a = str(__lowerCamelCase ).lower() with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__lowerCamelCase ) ) @require_fsdp @require_multi_gpu @slow class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' super().setUp() a = 0.82 a = [ '''fsdp_shard_grad_op_transformer_based_wrap''', '''fsdp_full_shard_transformer_based_wrap''', ] a = { '''multi_gpu_fp16''': 32_00, '''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 20_00, '''fsdp_full_shard_transformer_based_wrap_fp16''': 19_00, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } a = 1_60 a = 1_60 a = inspect.getfile(accelerate.test_utils ) a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_performance.py''' ) a = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp'''] for config in self.performance_configs: a = cmd.copy() for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in config: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "fp32" in config: cmd_config.append('''--mixed_precision=no''' ) else: cmd_config.append('''--mixed_precision=fp16''' ) if "cpu_offload" in config: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--performance_lower_bound={self.performance_lower_bound}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''', '''--mixed_precision=fp16''', '''--fsdp_transformer_layer_cls_to_wrap=BertLayer''', ] for i, strategy in enumerate(__lowerCamelCase ): a = cmd.copy() cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) if strategy != "FULL_SHARD": continue a = len(__lowerCamelCase ) for state_dict_type in FSDP_STATE_DICT_TYPE: a = cmd_config[:state_dict_config_index] cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", '''--partial_train_epoch=1''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) a = cmd_config[:-1] a = os.path.join(self.tmpdir ,'''epoch_0''' ) cmd_config.extend( [ F"""--resume_from_checkpoint={resume_from_checkpoint}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): a = cmd.copy() if "fp16" in spec: cmd_config.extend(['''--mixed_precision=fp16'''] ) else: cmd_config.extend(['''--mixed_precision=no'''] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['''--use_fsdp'''] ) for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in spec: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "cpu_offload" in spec: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--peak_memory_upper_bound={peak_mem_upper_bound}""", F"""--n_train={self.n_train}""", F"""--n_val={self.n_val}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
330
0
"""simple docstring""" from cva import destroyAllWindows, imread, imshow, waitKey def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]: """simple docstring""" a , a = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCAmelCase__ ): for j in range(UpperCAmelCase__ ): a = [2_5_5, 2_5_5, 2_5_5] - img[i][j] return img if __name__ == "__main__": # read original image UpperCamelCase__ : Any = imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCamelCase__ : List[str] = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
363
from __future__ import annotations import os from collections.abc import Mapping UpperCamelCase__ : Any = tuple[int, int] class lowerCamelCase_ : def __init__( self : Optional[Any] ,__lowerCamelCase : set[int] ,__lowerCamelCase : Mapping[EdgeT, int] ): '''simple docstring''' a = vertices a = { (min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items() } def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : EdgeT ,__lowerCamelCase : int ): '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) a = weight def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = Graph({min(self.vertices )} ,{} ) a = 42 a = 42 a = 42 a = 42 while len(subgraph.vertices ) < len(self.vertices ): a = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: a = edge a = weight subgraph.add_edge(__lowerCamelCase ,__lowerCamelCase ) return subgraph def SCREAMING_SNAKE_CASE__ ( snake_case_ = "p107_network.txt" ) -> int: """simple docstring""" a = os.path.abspath(os.path.dirname(snake_case_ ) ) a = os.path.join(snake_case_, snake_case_ ) a = {} a = 42 a = 42 a = 42 with open(snake_case_ ) as f: a = f.read().strip().split('''\n''' ) a = [line.split(''',''' ) for line in data] for edgea in range(1, len(snake_case_ ) ): for edgea in range(snake_case_ ): if adjaceny_matrix[edgea][edgea] != "-": a = int(adjaceny_matrix[edgea][edgea] ) a = Graph(set(range(len(snake_case_ ) ) ), snake_case_ ) a = graph.prims_algorithm() a = sum(graph.edges.values() ) a = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
330
0
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE__ ( snake_case_ = "" ) -> Any: """simple docstring""" a = url or '''https://www.imdb.com/chart/top/?ref_=nv_mv_250''' a = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text, '''html.parser''' ) a = soup.find_all('''td''', attrs='''titleColumn''' ) a = soup.find_all('''td''', class_='''ratingColumn imdbRating''' ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) } def SCREAMING_SNAKE_CASE__ ( snake_case_ = "IMDb_Top_250_Movies.csv" ) -> List[str]: """simple docstring""" a = get_imdb_top_aaa_movies() with open(SCREAMING_SNAKE_CASE_, '''w''', newline='''''' ) as out_file: a = csv.writer(SCREAMING_SNAKE_CASE_ ) writer.writerow(['''Movie title''', '''IMDb rating'''] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
364
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase__ : List[Any] = logging.get_logger(__name__) # General docstring UpperCamelCase__ : List[Any] = """RegNetConfig""" # Base docstring UpperCamelCase__ : Dict = """facebook/regnet-y-040""" UpperCamelCase__ : int = [1, 1_088, 7, 7] # Image classification docstring UpperCamelCase__ : Optional[Any] = """facebook/regnet-y-040""" UpperCamelCase__ : Dict = """tabby, tabby cat""" UpperCamelCase__ : Dict = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : Optional[str] = "relu" ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=__lowerCamelCase ,strides=__lowerCamelCase ,padding='''VALID''' ,groups=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ,) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) a = ACTaFN[activation] if activation is not None else tf.identity def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : List[str] ): '''simple docstring''' a = self.convolution(self.padding(__lowerCamelCase ) ) a = self.normalization(__lowerCamelCase ) a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Any ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : List[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config.num_channels a = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='''embedder''' ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = shape_list(__lowerCamelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) a = tf.transpose(__lowerCamelCase ,perm=(0, 2, 3, 1) ) a = self.embedder(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : str ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Tuple ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=1 ,strides=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ): '''simple docstring''' return self.normalization(self.convolution(__lowerCamelCase ) ,training=__lowerCamelCase ) class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : int ,**__lowerCamelCase : str ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) a = [ tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''relu''' ,name='''attention.0''' ), tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''sigmoid''' ,name='''attention.2''' ), ] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = self.pooler(__lowerCamelCase ) for layer_module in self.attention: a = layer_module(__lowerCamelCase ) a = hidden_state * pooled return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : Dict ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.2''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : List[str] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetSELayer(__lowerCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ,name='''layer.2''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.3''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer a = [ # downsampling is done in the first layer with stride of 2 layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ,name='''layers.0''' ), *[layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ): '''simple docstring''' for layer_module in self.layers: a = layer_module(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __lowerCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='''stages.0''' ,) ) a = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCamelCase ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,depth=__lowerCamelCase ,name=F"""stages.{i+1}""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = True ): '''simple docstring''' a = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: a = hidden_states + (hidden_state,) a = stage_module(__lowerCamelCase ) if output_hidden_states: a = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase ,hidden_states=__lowerCamelCase ) @keras_serializable class lowerCamelCase_ ( tf.keras.layers.Layer ): SCREAMING_SNAKE_CASE_ = RegNetConfig def __init__( self : Dict ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config a = TFRegNetEmbeddings(__lowerCamelCase ,name='''embedder''' ) a = TFRegNetEncoder(__lowerCamelCase ,name='''encoder''' ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) @unpack_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : bool = False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.embedder(__lowerCamelCase ,training=__lowerCamelCase ) a = self.encoder( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = encoder_outputs[0] a = self.pooler(__lowerCamelCase ) # Change to NCHW output format have uniformity in the modules a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: a = tuple([tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = RegNetConfig SCREAMING_SNAKE_CASE_ = 'regnet' SCREAMING_SNAKE_CASE_ = 'pixel_values' @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} UpperCamelCase__ : Union[str, Any] = R""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCamelCase__ : List[str] = R""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , a_ , ) class lowerCamelCase_ ( a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : int ,**__lowerCamelCase : Union[str, Any] ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : List[str]=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( pixel_values=__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a_ , ) class lowerCamelCase_ ( a_ , a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : str ,**__lowerCamelCase : Any ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = config.num_labels a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) # classification head a = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Dict=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = outputs.pooler_output if return_dict else outputs[1] a = self.classifier[0](__lowerCamelCase ) a = self.classifier[1](__lowerCamelCase ) a = None if labels is None else self.hf_compute_loss(labels=__lowerCamelCase ,logits=__lowerCamelCase ) if not return_dict: a = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states )
330
0
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float: """simple docstring""" return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(snake_case__, snake_case__ ) ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[list[float] | float]]: """simple docstring""" if dataset.ndim != value_array.ndim: a = ( 'Wrong input data\'s dimensions... ' f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}""" ) raise ValueError(snake_case__ ) try: if dataset.shape[1] != value_array.shape[1]: a = ( 'Wrong input data\'s shape... ' f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}""" ) raise ValueError(snake_case__ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('''Wrong shape''' ) if dataset.dtype != value_array.dtype: a = ( 'Input data have different datatype... ' f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}""" ) raise TypeError(snake_case__ ) a = [] for value in value_array: a = euclidean(snake_case__, dataset[0] ) a = dataset[0].tolist() for dataset_value in dataset[1:]: a = euclidean(snake_case__, snake_case__ ) if dist > temp_dist: a = temp_dist a = dataset_value.tolist() answer.append([vector, dist] ) return answer def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float: """simple docstring""" return np.dot(snake_case__, snake_case__ ) / (norm(snake_case__ ) * norm(snake_case__ )) if __name__ == "__main__": import doctest doctest.testmod()
365
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'efficientformer' def __init__( self : Optional[int] ,__lowerCamelCase : List[int] = [3, 2, 6, 4] ,__lowerCamelCase : List[int] = [48, 96, 2_24, 4_48] ,__lowerCamelCase : List[bool] = [True, True, True, True] ,__lowerCamelCase : int = 4_48 ,__lowerCamelCase : int = 32 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : int = 7 ,__lowerCamelCase : int = 5 ,__lowerCamelCase : int = 8 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 16 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : bool = True ,__lowerCamelCase : bool = True ,__lowerCamelCase : float = 1e-5 ,__lowerCamelCase : str = "gelu" ,__lowerCamelCase : float = 0.02 ,__lowerCamelCase : float = 1e-12 ,__lowerCamelCase : int = 2_24 ,__lowerCamelCase : float = 1e-05 ,**__lowerCamelCase : Dict ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_act a = hidden_dropout_prob a = hidden_sizes a = num_hidden_layers a = num_attention_heads a = initializer_range a = layer_norm_eps a = patch_size a = num_channels a = depths a = mlp_expansion_ratio a = downsamples a = dim a = key_dim a = attention_ratio a = resolution a = pool_size a = downsample_patch_size a = downsample_stride a = downsample_pad a = drop_path_rate a = num_metaad_blocks a = distillation a = use_layer_scale a = layer_scale_init_value a = image_size a = batch_norm_eps
330
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = {"vocab_file": "spiece.model"} UpperCamelCase__ : List[Any] = { "vocab_file": { "bert_for_seq_generation": ( "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model" ), } } UpperCamelCase__ : Dict = {"bert_for_seq_generation": 512} class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] def __init__( self : Union[str, Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[Any]="<s>" ,__lowerCamelCase : Any="</s>" ,__lowerCamelCase : Dict="<unk>" ,__lowerCamelCase : Union[str, Any]="<pad>" ,__lowerCamelCase : Any="<::::>" ,__lowerCamelCase : str = None ,**__lowerCamelCase : Union[str, Any] ,): '''simple docstring''' a = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__lowerCamelCase ,) a = vocab_file a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' return self.sp_model.get_piece_size() def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' a = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ): '''simple docstring''' a = self.__dict__.copy() a = None return state def __setstate__( self : Dict ,__lowerCamelCase : str ): '''simple docstring''' a = d # for backward compatibility if not hasattr(self ,'''sp_model_kwargs''' ): a = {} a = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Any ): '''simple docstring''' return self.sp_model.encode(__lowerCamelCase ,out_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : int ): '''simple docstring''' return self.sp_model.piece_to_id(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : str ): '''simple docstring''' a = self.sp_model.IdToPiece(__lowerCamelCase ) return token def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : List[str] ): '''simple docstring''' a = [] a = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__lowerCamelCase ) + token a = [] else: current_sub_tokens.append(__lowerCamelCase ) out_string += self.sp_model.decode(__lowerCamelCase ) return out_string.strip() def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : str = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__lowerCamelCase ,'''wb''' ) as fi: a = self.sp_model.serialized_model_proto() fi.write(__lowerCamelCase ) return (out_vocab_file,)
366
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCamelCase__ : Any = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] UpperCamelCase__ : Optional[Any] = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] UpperCamelCase__ : Optional[Any] = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : List[str] = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : Optional[int] = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for tf_name, hf_name in patterns: a = k.replace(snake_case_, snake_case_ ) return k def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> BigBirdPegasusForConditionalGeneration: """simple docstring""" a = BigBirdPegasusConfig(**snake_case_ ) a = BigBirdPegasusForConditionalGeneration(snake_case_ ) a = torch_model.state_dict() a = {} # separating decoder weights a = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} a = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = DECODER_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = REMAINING_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" a = mapping['''model.embed_positions.weight'''] a = mapping.pop('''model.embed_positions.weight''' ) a , a = torch_model.load_state_dict(snake_case_, strict=snake_case_ ) a = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict: """simple docstring""" a = tf.train.list_variables(snake_case_ ) a = {} a = ['''global_step'''] for name, shape in tqdm(snake_case_, desc='''converting tf checkpoint to dict''' ): a = any(pat in name for pat in ignore_name ) if skip_key: continue a = tf.train.load_variable(snake_case_, snake_case_ ) a = array return tf_weights def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int: """simple docstring""" a = get_tf_weights_as_numpy(snake_case_ ) a = convert_bigbird_pegasus(snake_case_, snake_case_ ) torch_model.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCamelCase__ : int = parser.parse_args() UpperCamelCase__ : Tuple = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
0
import importlib.metadata from typing import Union from packaging.version import Version, parse from .constants import STR_OPERATION_TO_FUNC UpperCamelCase__ : Any = parse(importlib.metadata.version("""torch""")) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Any: """simple docstring""" if operation not in STR_OPERATION_TO_FUNC.keys(): raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" ) a = STR_OPERATION_TO_FUNC[operation] if isinstance(snake_case_, snake_case_ ): a = parse(importlib.metadata.version(snake_case_ ) ) return operation(snake_case_, parse(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" return compare_versions(snake_case_, snake_case_, snake_case_ )
367
import re def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" if len(re.findall('''[ATCG]''', snake_case_ ) ) != len(snake_case_ ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
330
0
"""simple docstring""" from manim import * class lowerCamelCase_ ( UpperCamelCase__ ): def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = Rectangle(height=0.5 ,width=0.5 ) a = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 ) a = Rectangle(height=0.25 ,width=0.25 ) a = [mem.copy() for i in range(6 )] a = [mem.copy() for i in range(6 )] a = VGroup(*__a ).arrange(__a ,buff=0 ) a = VGroup(*__a ).arrange(__a ,buff=0 ) a = VGroup(__a ,__a ).arrange(__a ,buff=0 ) a = Text('''CPU''' ,font_size=24 ) a = Group(__a ,__a ).arrange(__a ,buff=0.5 ,aligned_edge=__a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__a ) a = [mem.copy() for i in range(4 )] a = VGroup(*__a ).arrange(__a ,buff=0 ) a = Text('''GPU''' ,font_size=24 ) a = Group(__a ,__a ).arrange(__a ,buff=0.5 ,aligned_edge=__a ) gpu.move_to([-1, -1, 0] ) self.add(__a ) a = [mem.copy() for i in range(6 )] a = VGroup(*__a ).arrange(__a ,buff=0 ) a = Text('''Model''' ,font_size=24 ) a = Group(__a ,__a ).arrange(__a ,buff=0.5 ,aligned_edge=__a ) model.move_to([3, -1.0, 0] ) self.add(__a ) a = [] a = [] for i, rect in enumerate(__a ): a = fill.copy().set_fill(__a ,opacity=0.8 ) target.move_to(__a ) model_arr.append(__a ) a = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__a ,opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(__a ) self.add(*__a ,*__a ) a = [meta_mem.copy() for i in range(6 )] a = [meta_mem.copy() for i in range(6 )] a = VGroup(*__a ).arrange(__a ,buff=0 ) a = VGroup(*__a ).arrange(__a ,buff=0 ) a = VGroup(__a ,__a ).arrange(__a ,buff=0 ) a = Text('''Disk''' ,font_size=24 ) a = Group(__a ,__a ).arrange(__a ,buff=0.5 ,aligned_edge=__a ) disk.move_to([-4, -1.25, 0] ) self.add(__a ,__a ) a = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) a = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=18 ,) key_text.move_to([-5, 2.4, 0] ) self.add(__a ,__a ) a = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=18 ,) blue_text.next_to(__a ,DOWN * 2.4 ,aligned_edge=key_text.get_left() ) self.add(__a ) a = MarkupText( F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=24 ,) step_a.move_to([2, 2, 0] ) self.play(Write(__a ) ) a = Square(0.3 ) input.set_fill(__a ,opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] ,__a ,buff=0.5 ) self.play(Write(__a ) ) input.generate_target() input.target.next_to(model_arr[0] ,direction=__a ,buff=0.02 ) self.play(MoveToTarget(__a ) ) self.play(FadeOut(__a ) ) a = Arrow(start=__a ,end=__a ,color=__a ,buff=0.5 ) a.next_to(model_arr[0].get_left() ,__a ,buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) a = MarkupText( F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=24 ,) step_a.move_to([2, 2, 0] ) self.play(Write(__a ,run_time=3 ) ) a = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02} self.play( Write(__a ) ,Circumscribe(model_arr[0] ,color=__a ,**__a ) ,Circumscribe(model_cpu_arr[0] ,color=__a ,**__a ) ,Circumscribe(gpu_rect[0] ,color=__a ,**__a ) ,) self.play(MoveToTarget(model_cpu_arr[0] ) ) a = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 ,__a ,buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) a = AnimationGroup( FadeOut(__a ,run_time=0.5 ) ,MoveToTarget(__a ,run_time=0.5 ) ,FadeIn(__a ,run_time=0.5 ) ,lag_ratio=0.2 ) self.play(__a ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: a = 0.7 self.play( Circumscribe(model_arr[i] ,**__a ) ,Circumscribe(cpu_left_col_base[i] ,**__a ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__a ,**__a ) ,Circumscribe(gpu_rect[0] ,color=__a ,**__a ) ,Circumscribe(model_arr[i + 1] ,color=__a ,**__a ) ,) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,) else: self.play( MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 ) self.play( Circumscribe(model_arr[-1] ,color=__a ,**__a ) ,Circumscribe(cpu_left_col_base[-1] ,color=__a ,**__a ) ,Circumscribe(gpu_rect[0] ,color=__a ,**__a ) ,) self.play(MoveToTarget(model_cpu_arr[i] ) ) a = a_c a = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 ) self.play( FadeOut(__a ) ,FadeOut(__a ,run_time=0.5 ) ,) a = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__a ,run_time=3 ) ,MoveToTarget(__a ) ) self.wait()
368
from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count += 1 a = '''_''' if count > 1: return False else: return "".join(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]: """simple docstring""" a = [] while True: a = ['''$'''] * len(snake_case_ ) a = [] for i in range(len(snake_case_ ) ): for j in range(i + 1, len(snake_case_ ) ): a = compare_string(binary[i], binary[j] ) if k is False: a = '''*''' a = '''*''' temp.append('''X''' ) for i in range(len(snake_case_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(snake_case_ ) == 0: return pi a = list(set(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] for minterm in minterms: a = '''''' for _ in range(snake_case_ ): a = str(minterm % 2 ) + string minterm //= 2 temp.append(snake_case_ ) return temp def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] a = [0] * len(snake_case_ ) for i in range(len(chart[0] ) ): a = 0 a = -1 for j in range(len(snake_case_ ) ): if chart[j][i] == 1: count += 1 a = j if count == 1: a = 1 for i in range(len(snake_case_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(snake_case_ ) ): a = 0 temp.append(prime_implicants[i] ) while True: a = 0 a = -1 a = 0 for i in range(len(snake_case_ ) ): a = chart[i].count(1 ) if count_n > max_n: a = count_n a = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(snake_case_ ) ): a = 0 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]: """simple docstring""" a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )] for i in range(len(snake_case_ ) ): a = prime_implicants[i].count('''_''' ) for j in range(len(snake_case_ ) ): if is_for_table(prime_implicants[i], binary[j], snake_case_ ): a = 1 return chart def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" a = int(input('''Enter the no. of variables\n''' ) ) a = [ float(snake_case_ ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] a = decimal_to_binary(snake_case_, snake_case_ ) a = check(snake_case_ ) print('''Prime Implicants are:''' ) print(snake_case_ ) a = prime_implicant_chart(snake_case_, snake_case_ ) a = selection(snake_case_, snake_case_ ) print('''Essential Prime Implicants are:''' ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
330
0
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class lowerCamelCase_ : def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Dict ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' raise NotImplementedError() class lowerCamelCase_ ( _UpperCamelCase ): def __init__( self : Optional[int] ,__lowerCamelCase : "AutoTokenizer" ,__lowerCamelCase : bool = False ,**__lowerCamelCase : int ): '''simple docstring''' a = tokenizer a = skip_prompt a = decode_kwargs # variables used in the streaming process a = [] a = 0 a = True def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : Any ): '''simple docstring''' if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: a = value[0] if self.skip_prompt and self.next_tokens_are_prompt: a = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) a = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): a = text[self.print_len :] a = [] a = 0 # If the last token is a CJK character, we print the characters. elif len(_UpperCAmelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): a = text[self.print_len :] self.print_len += len(_UpperCAmelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: a = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(_UpperCAmelCase ) self.on_finalized_text(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' if len(self.token_cache ) > 0: a = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) a = text[self.print_len :] a = [] a = 0 else: a = '' a = True self.on_finalized_text(_UpperCAmelCase ,stream_end=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ): '''simple docstring''' print(_UpperCAmelCase ,flush=_UpperCAmelCase ,end='''''' if not stream_end else None ) def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Any ): '''simple docstring''' if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False class lowerCamelCase_ ( _UpperCamelCase ): def __init__( self : Any ,__lowerCamelCase : "AutoTokenizer" ,__lowerCamelCase : bool = False ,__lowerCamelCase : Optional[float] = None ,**__lowerCamelCase : Tuple ): '''simple docstring''' super().__init__(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) a = Queue() a = None a = timeout def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ): '''simple docstring''' self.text_queue.put(_UpperCAmelCase ,timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal ,timeout=self.timeout ) def __iter__( self : Optional[int] ): '''simple docstring''' return self def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
369
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCamelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a_ ) class lowerCamelCase_ ( a_ ): def __init__( self : int ,*__lowerCamelCase : str ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(*__lowerCamelCase ,**__lowerCamelCase ) requires_backends(self ,'''vision''' ) self.check_model_type(__lowerCamelCase ) def __call__( self : int ,__lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**__lowerCamelCase : str ): '''simple docstring''' return super().__call__(__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,**__lowerCamelCase : Dict ): '''simple docstring''' return {}, {}, {} def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = load_image(__lowerCamelCase ) a = image.size a = self.image_processor(images=__lowerCamelCase ,return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = self.model(**__lowerCamelCase ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = model_outputs.predicted_depth a = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=__lowerCamelCase ) a = prediction.squeeze().cpu().numpy() a = (output * 2_55 / np.max(__lowerCamelCase )).astype('''uint8''' ) a = Image.fromarray(__lowerCamelCase ) a = {} a = predicted_depth a = depth return output_dict
330
0
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase_ : def __init__( self : Tuple ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int=13 ,__lowerCamelCase : Tuple=7 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Union[str, Any]=True ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : int=99 ,__lowerCamelCase : List[Any]=24 ,__lowerCamelCase : List[str]=2 ,__lowerCamelCase : Union[str, Any]=6 ,__lowerCamelCase : Dict=37 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Dict=0.1 ,__lowerCamelCase : Optional[Any]=0.1 ,__lowerCamelCase : Dict=5_12 ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : Dict=2 ,__lowerCamelCase : Any=0.02 ,__lowerCamelCase : Dict=3 ,__lowerCamelCase : Any=None ,__lowerCamelCase : Union[str, Any]=10_00 ,): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = scope a = range_bbox def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) a = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: a = bbox[i, j, 3] a = bbox[i, j, 1] a = t if bbox[i, j, 2] < bbox[i, j, 0]: a = bbox[i, j, 2] a = bbox[i, j, 0] a = t a = None if self.use_input_mask: a = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) a = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : int ,__lowerCamelCase : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : Optional[int] ,): '''simple docstring''' a = LiltModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a = model(UpperCamelCase__ ,bbox=UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ) a = model(UpperCamelCase__ ,bbox=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ) a = model(UpperCamelCase__ ,bbox=UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Dict ,__lowerCamelCase : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[int] ,): '''simple docstring''' a = self.num_labels a = LiltForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a = model( UpperCamelCase__ ,bbox=UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : Tuple ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,): '''simple docstring''' a = LiltForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() a = model( UpperCamelCase__ ,bbox=UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,token_type_ids=UpperCamelCase__ ,start_positions=UpperCamelCase__ ,end_positions=UpperCamelCase__ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( a ) = config_and_inputs a = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class lowerCamelCase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ = ( { """feature-extraction""": LiltModel, """question-answering""": LiltForQuestionAnswering, """text-classification""": LiltForSequenceClassification, """token-classification""": LiltForTokenClassification, """zero-shot""": LiltForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Any ,__lowerCamelCase : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : int ): '''simple docstring''' return True def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = LiltModelTester(self ) a = ConfigTester(self ,config_class=UpperCamelCase__ ,hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: a = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = LiltModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch @slow class lowerCamelCase_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(UpperCamelCase__ ) a = torch.tensor([[1, 2]] ,device=UpperCamelCase__ ) a = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=UpperCamelCase__ ) # forward pass with torch.no_grad(): a = model(input_ids=UpperCamelCase__ ,bbox=UpperCamelCase__ ) a = torch.Size([1, 2, 7_68] ) a = torch.tensor( [[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] ,device=UpperCamelCase__ ,) self.assertTrue(outputs.last_hidden_state.shape ,UpperCamelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,UpperCamelCase__ ,atol=1e-3 ) )
370
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a_ ) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} ) SCREAMING_SNAKE_CASE_ = Features({} ) SCREAMING_SNAKE_CASE_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' return {self.text_column: "text"}
330
0
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''', '''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''', '''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''ctc_proj''', '''mask_emb''': '''masked_spec_embed''', } UpperCamelCase__ : Dict = [ '''ctc_proj''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[Any]: """simple docstring""" for attribute in key.split('''.''' ): a = getattr(__lowerCAmelCase, __lowerCAmelCase ) if weight_type is not None: a = getattr(__lowerCAmelCase, __lowerCAmelCase ).shape else: a = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value else: a = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" a = [] a = fairseq_model.state_dict() a = hf_model.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', ) a = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: a = True if "*" in mapped_key: a = name.split(__lowerCAmelCase )[0].split('''.''' )[-2] a = mapped_key.replace('''*''', __lowerCAmelCase ) if "weight_g" in name: a = '''weight_g''' elif "weight_v" in name: a = '''weight_v''' elif "bias" in name and "relative_attention_bias" not in name: a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj a = '''weight''' else: a = None set_recursively(__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase ) continue if not is_used: unused_weights.append(__lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = full_name.split('''conv_layers.''' )[-1] a = name.split('''.''' ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__lowerCAmelCase ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None ) -> Optional[int]: """simple docstring""" a = torch.load(__lowerCAmelCase ) a = WavLMConfigOrig(checkpoint['''cfg'''] ) a = WavLMOrig(__lowerCAmelCase ) model.load_state_dict(checkpoint['''model'''] ) model.eval() if config_path is not None: a = WavLMConfig.from_pretrained(__lowerCAmelCase ) else: a = WavLMConfig() a = WavLMModel(__lowerCAmelCase ) recursively_load_weights(__lowerCAmelCase, __lowerCAmelCase ) hf_wavlm.save_pretrained(__lowerCAmelCase ) if __name__ == "__main__": UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") UpperCamelCase__ : Union[str, Any] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
371
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'yolos' def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = num_detection_tokens a = use_mid_position_embeddings a = auxiliary_loss # Hungarian matcher a = class_cost a = bbox_cost a = giou_cost # Loss coefficients a = bbox_loss_coefficient a = giou_loss_coefficient a = eos_coefficient class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' return 1e-4 @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return 12
330
0
import numpy as np UpperCamelCase__ : int = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class lowerCamelCase_ : def __init__( self : int ): '''simple docstring''' a = np.array(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : str ): '''simple docstring''' a , a = np.where(letter == self.SQUARE ) a = np.concatenate([indexa + 1, indexa + 1] ) return indexes def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ,__lowerCamelCase : int ): '''simple docstring''' a = self.SQUARE[indexa - 1, indexa - 1] return letter def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ): '''simple docstring''' a = message.lower() a = message.replace(''' ''' ,'''''' ) a = message.replace('''j''' ,'''i''' ) a = np.empty((2, len(__lowerCamelCase )) ) for letter_index in range(len(__lowerCamelCase ) ): a = self.letter_to_numbers(message[letter_index] ) a = numbers[0] a = numbers[1] a = first_step.reshape(2 * len(__lowerCamelCase ) ) a = '''''' for numbers_index in range(len(__lowerCamelCase ) ): a = int(second_step[numbers_index * 2] ) a = int(second_step[(numbers_index * 2) + 1] ) a = self.numbers_to_letter(__lowerCamelCase ,__lowerCamelCase ) a = encoded_message + letter return encoded_message def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ): '''simple docstring''' a = message.lower() message.replace(''' ''' ,'''''' ) a = np.empty(2 * len(__lowerCamelCase ) ) for letter_index in range(len(__lowerCamelCase ) ): a = self.letter_to_numbers(message[letter_index] ) a = numbers[0] a = numbers[1] a = first_step.reshape((2, len(__lowerCamelCase )) ) a = '''''' for numbers_index in range(len(__lowerCamelCase ) ): a = int(second_step[0, numbers_index] ) a = int(second_step[1, numbers_index] ) a = self.numbers_to_letter(__lowerCamelCase ,__lowerCamelCase ) a = decoded_message + letter return decoded_message
350
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int: """simple docstring""" a = '''''' for i in table: res += inp[i - 1] return res def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" return data[1:] + data[0] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]: """simple docstring""" a = '''''' for i in range(len(snake_case_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = int('''0b''' + data[0] + data[-1], 2 ) a = int('''0b''' + data[1:3], 2 ) return bin(s[row][col] )[2:] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]: """simple docstring""" a = message[:4] a = message[4:] a = apply_table(snake_case_, snake_case_ ) a = xor(snake_case_, snake_case_ ) a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741 a = apply_sbox(snake_case_, temp[4:] ) a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741 a = '''0''' * (2 - len(snake_case_ )) + r a = apply_table(l + r, snake_case_ ) a = xor(snake_case_, snake_case_ ) return temp + right if __name__ == "__main__": UpperCamelCase__ : int = input("""Enter 10 bit key: """) UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """) UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9] UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] UpperCamelCase__ : Optional[int] = [2, 4, 3, 1] UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7] UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6] UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1] UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table) UpperCamelCase__ : str = temp[:5] UpperCamelCase__ : List[Any] = temp[5:] UpperCamelCase__ : Dict = left_shift(left) UpperCamelCase__ : Any = left_shift(right) UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table) UpperCamelCase__ : List[str] = left_shift(left) UpperCamelCase__ : int = left_shift(right) UpperCamelCase__ : List[str] = left_shift(left) UpperCamelCase__ : Dict = left_shift(right) UpperCamelCase__ : List[str] = apply_table(left + right, pa_table) # encryption UpperCamelCase__ : Tuple = apply_table(message, IP) UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4] UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Tuple = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP) UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4] UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Any = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
330
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ : List[Any] = { """configuration_instructblip""": [ """INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InstructBlipConfig""", """InstructBlipQFormerConfig""", """InstructBlipVisionConfig""", ], """processing_instructblip""": ["""InstructBlipProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = [ """INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """InstructBlipQFormerModel""", """InstructBlipPreTrainedModel""", """InstructBlipForConditionalGeneration""", """InstructBlipVisionModel""", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
351
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) a = '''The dog is cute and lives in the garden house''' a = jnp.array([tokenizer.encode(__lowerCamelCase )] ) a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim a = jnp.array( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) a = model(__lowerCamelCase )['''last_hidden_state'''] self.assertEqual(output.shape ,__lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
330
0
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCamelCase__ : Dict = logging.get_logger(__name__) class lowerCamelCase_ : def __init__( self : Optional[int] ,__lowerCamelCase : str = None ,__lowerCamelCase : uuid.UUID = None ,__lowerCamelCase : Optional[Any]=None ,__lowerCamelCase : List[str]=None ): '''simple docstring''' if not conversation_id: a = uuid.uuida() if past_user_inputs is None: a = [] if generated_responses is None: a = [] a = conversation_id a = past_user_inputs a = generated_responses a = text def __eq__( self : str ,__lowerCamelCase : Any ): '''simple docstring''' if not isinstance(__lowerCamelCase ,__lowerCamelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ F"""with: \"{text}\".""" ) a = text else: logger.warning( F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: a = text def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) a = None def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ): '''simple docstring''' self.generated_responses.append(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Tuple ): '''simple docstring''' a = F"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): a = '''user''' if is_user else '''bot''' output += F"""{name} >> {text} \n""" return output @add_end_docstrings( a_ , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class lowerCamelCase_ ( a_ ): def __init__( self : Any ,*__lowerCamelCase : Union[str, Any] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(*__lowerCamelCase ,**__lowerCamelCase ) if self.tokenizer.pad_token_id is None: a = self.tokenizer.eos_token def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : str=None ,__lowerCamelCase : int=None ,**__lowerCamelCase : Dict ): '''simple docstring''' a = {} a = {} a = {} if min_length_for_response is not None: a = min_length_for_response if minimum_tokens is not None: a = minimum_tokens if "max_length" in generate_kwargs: a = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: a = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(__lowerCamelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self : List[Any] ,__lowerCamelCase : Union[Conversation, List[Conversation]] ,__lowerCamelCase : Union[str, Any]=0 ,**__lowerCamelCase : Dict ): '''simple docstring''' a = super().__call__(__lowerCamelCase ,num_workers=__lowerCamelCase ,**__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) and len(__lowerCamelCase ) == 1: return outputs[0] return outputs def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Conversation ,__lowerCamelCase : int=32 ): '''simple docstring''' if not isinstance(__lowerCamelCase ,__lowerCamelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer ,'''_build_conversation_input_ids''' ): a = self.tokenizer._build_conversation_input_ids(__lowerCamelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version a = self._legacy_parse_and_tokenize(__lowerCamelCase ) if self.framework == "pt": a = torch.LongTensor([input_ids] ) elif self.framework == "tf": a = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Tuple ,__lowerCamelCase : Dict=10 ,**__lowerCamelCase : Dict ): '''simple docstring''' a = generate_kwargs.get('''max_length''' ,self.model.config.max_length ) a = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) a = max_length - minimum_tokens a = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: a = model_inputs['''attention_mask'''][:, -trim:] a = model_inputs.pop('''conversation''' ) a = max_length a = self.model.generate(**__lowerCamelCase ,**__lowerCamelCase ) if self.model.config.is_encoder_decoder: a = 1 else: a = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : int=True ): '''simple docstring''' a = model_outputs['''output_ids'''] a = self.tokenizer.decode( output_ids[0] ,skip_special_tokens=__lowerCamelCase ,clean_up_tokenization_spaces=__lowerCamelCase ,) a = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(__lowerCamelCase ) return conversation def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Conversation ): '''simple docstring''' a = self.tokenizer.eos_token_id a = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) ) if len(__lowerCamelCase ) > self.tokenizer.model_max_length: a = input_ids[-self.tokenizer.model_max_length :] return input_ids
352
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ : Union[str, Any] = 16 UpperCamelCase__ : Dict = 32 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 1_6 ) -> Tuple: """simple docstring""" a = AutoTokenizer.from_pretrained('''bert-base-cased''' ) a = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(snake_case_ ): # max_length=None => use the model max length (it's actually the default) a = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a = datasets.map( snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(snake_case_ ): # On TPU it's best to pad everything to the same length or training will be very slow. a = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a = 1_6 elif accelerator.mixed_precision != "no": a = 8 else: a = None return tokenizer.pad( snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', ) # Instantiate dataloaders. a = DataLoader( tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) a = DataLoader( tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ : int = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1": a = 2 # Initialize accelerator a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config['''lr'''] a = int(config['''num_epochs'''] ) a = int(config['''seed'''] ) a = int(config['''batch_size'''] ) a = evaluate.load('''glue''', '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case_ ) def inner_training_loop(snake_case_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Instantiate optimizer a = AdamW(params=model.parameters(), lr=snake_case_ ) a , a = get_dataloaders(snake_case_, snake_case_ ) # Instantiate scheduler a = get_linear_schedule_with_warmup( optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a = model(**snake_case_ ) a = outputs.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a = model(**snake_case_ ) a = outputs.logits.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case_, references=snake_case_, ) a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""", snake_case_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: """simple docstring""" a = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' ) a = parser.parse_args() a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(snake_case_, snake_case_ ) if __name__ == "__main__": main()
330
0
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def SCREAMING_SNAKE_CASE__ ( ) -> Dict: """simple docstring""" a = argparse.ArgumentParser() parser.add_argument( '''-m''', '''--pretrained_model_name_or_path''', type=snake_case_, default=snake_case_, required=snake_case_, help='''Path to pretrained model or model identifier from huggingface.co/models.''', ) parser.add_argument( '''-c''', '''--caption''', type=snake_case_, default='''robotic cat with wings''', help='''Text used to generate images.''', ) parser.add_argument( '''-n''', '''--images_num''', type=snake_case_, default=4, help='''How much images to generate.''', ) parser.add_argument( '''-s''', '''--seed''', type=snake_case_, default=4_2, help='''Seed for random process.''', ) parser.add_argument( '''-ci''', '''--cuda_id''', type=snake_case_, default=0, help='''cuda_id.''', ) a = parser.parse_args() return args def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[int]: """simple docstring""" if not len(snake_case_ ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) a , a = imgs[0].size a = Image.new('''RGB''', size=(cols * w, rows * h) ) a , a = grid.size for i, img in enumerate(snake_case_ ): grid.paste(snake_case_, box=(i % cols * w, i // cols * h) ) return grid def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_="robotic cat with wings", snake_case_=7.5, snake_case_=5_0, snake_case_=1, snake_case_=4_2, ) -> Any: """simple docstring""" a = torch.Generator(pipeline.device ).manual_seed(snake_case_ ) a = pipeline( snake_case_, guidance_scale=snake_case_, num_inference_steps=snake_case_, generator=snake_case_, num_images_per_prompt=snake_case_, ).images a = int(math.sqrt(snake_case_ ) ) a = image_grid(snake_case_, rows=_rows, cols=num_images_per_prompt // _rows ) return grid, images UpperCamelCase__ : Dict = parse_args() # Load models and create wrapper for stable diffusion UpperCamelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""") UpperCamelCase__ : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""") UpperCamelCase__ : int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""") UpperCamelCase__ : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""") UpperCamelCase__ : List[str] = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) UpperCamelCase__ : List[str] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")): UpperCamelCase__ : Optional[int] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, """unet""", unet) else: UpperCamelCase__ : int = unet.to(torch.device("""cuda""", args.cuda_id)) UpperCamelCase__ : str = pipeline.to(unet.device) UpperCamelCase__ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split())))) UpperCamelCase__ : Any = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
353
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } UpperCamelCase__ : Optional[Any] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for attribute in key.split('''.''' ): a = getattr(snake_case_, snake_case_ ) if weight_type is not None: a = getattr(snake_case_, snake_case_ ).shape else: a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value else: a = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = [] a = fairseq_model.state_dict() a = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', ) a = True else: for key, mapped_key in MAPPING.items(): a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue a = True if "*" in mapped_key: a = name.split(snake_case_ )[0].split('''.''' )[-2] a = mapped_key.replace('''*''', snake_case_ ) if "weight_g" in name: a = '''weight_g''' elif "weight_v" in name: a = '''weight_v''' elif "bias" in name: a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj a = '''weight''' else: a = None set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = full_name.split('''conv_layers.''' )[-1] a = name.split('''.''' ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Union[str, Any]: """simple docstring""" if config_path is not None: a = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: a = UniSpeechSatConfig() a = '''''' if is_finetuned: a = UniSpeechSatForCTC(snake_case_ ) else: a = UniSpeechSatForPreTraining(snake_case_ ) a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) a = model[0].eval() recursively_load_weights(snake_case_, snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase__ : int = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
0
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> bool: """simple docstring""" if len(snake_case_ ) < 2: raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' ) if any(i <= 0 for i in nums ): raise ValueError('''All values must be greater than 0''' ) a = nums.copy() copy_nums.sort() return copy_nums[-1] < sum(copy_nums[:-1] ) if __name__ == "__main__": import doctest doctest.testmod()
354
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]: """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" class lowerCamelCase_ : def __init__( self : Dict ,__lowerCamelCase : List[str] ): '''simple docstring''' a = metric_id class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() ) @pytest.mark.parametrize( '''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple: """simple docstring""" if "tmp_path" in args: a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(snake_case_, match='''https://huggingface.co/docs/evaluate''' ): func(*snake_case_ )
330
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list: """simple docstring""" for i in range(len(snake_case_ ) - 1, 0, -1 ): a = False for j in range(snake_case_, 0, -1 ): if unsorted[j] < unsorted[j - 1]: a , a = unsorted[j - 1], unsorted[j] a = True for j in range(snake_case_ ): if unsorted[j] > unsorted[j + 1]: a , a = unsorted[j + 1], unsorted[j] a = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase__ : str = [int(item) for item in user_input.split(""",""")] print(F"{cocktail_shaker_sort(unsorted) = }")
355
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'luke' def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase ) a = vocab_size a = entity_vocab_size a = hidden_size a = entity_emb_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = use_entity_aware_attention a = classifier_dropout
330
0
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCamelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a_ ) class lowerCamelCase_ ( a_ ): def __init__( self : int ,*__lowerCamelCase : str ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(*__lowerCamelCase ,**__lowerCamelCase ) requires_backends(self ,'''vision''' ) self.check_model_type(__lowerCamelCase ) def __call__( self : int ,__lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**__lowerCamelCase : str ): '''simple docstring''' return super().__call__(__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,**__lowerCamelCase : Dict ): '''simple docstring''' return {}, {}, {} def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = load_image(__lowerCamelCase ) a = image.size a = self.image_processor(images=__lowerCamelCase ,return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = self.model(**__lowerCamelCase ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = model_outputs.predicted_depth a = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=__lowerCamelCase ) a = prediction.squeeze().cpu().numpy() a = (output * 2_55 / np.max(__lowerCamelCase )).astype('''uint8''' ) a = Image.fromarray(__lowerCamelCase ) a = {} a = predicted_depth a = depth return output_dict
356
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": UpperCamelCase__ : Optional[int] = pd.read_csv("""sample_data.csv""", header=None) UpperCamelCase__ : Tuple = df.shape[:1][0] # If you're using some other dataset input the target column UpperCamelCase__ : List[Any] = df.iloc[:, 1:2] UpperCamelCase__ : Union[str, Any] = actual_data.values.reshape(len_data, 1) UpperCamelCase__ : List[Any] = MinMaxScaler().fit_transform(actual_data) UpperCamelCase__ : Optional[Any] = 10 UpperCamelCase__ : int = 5 UpperCamelCase__ : List[str] = 20 UpperCamelCase__ : Optional[int] = len_data - periods * look_back UpperCamelCase__ : Union[str, Any] = actual_data[:division] UpperCamelCase__ : str = actual_data[division - look_back :] UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = [], [] UpperCamelCase__ , UpperCamelCase__ : str = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) UpperCamelCase__ : List[str] = np.array(train_x) UpperCamelCase__ : Optional[Any] = np.array(test_x) UpperCamelCase__ : Tuple = np.array([list(i.ravel()) for i in train_y]) UpperCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y]) UpperCamelCase__ : Union[str, Any] = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") UpperCamelCase__ : Tuple = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) UpperCamelCase__ : Tuple = model.predict(x_test)
330
0
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCamelCase__ : str = logging.getLogger(__name__) require_version("""pytorch_lightning>=1.0.4""") UpperCamelCase__ : Dict = { """base""": AutoModel, """sequence-classification""": AutoModelForSequenceClassification, """question-answering""": AutoModelForQuestionAnswering, """pretraining""": AutoModelForPreTraining, """token-classification""": AutoModelForTokenClassification, """language-modeling""": AutoModelWithLMHead, """summarization""": AutoModelForSeqaSeqLM, """translation""": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCamelCase__ : Tuple = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCamelCase__ : List[Any] = sorted(arg_to_scheduler.keys()) UpperCamelCase__ : Union[str, Any] = """{""" + """, """.join(arg_to_scheduler_choices) + """}""" class lowerCamelCase_ ( pl.LightningModule ): def __init__( self : int ,__lowerCamelCase : argparse.Namespace ,__lowerCamelCase : List[str]=None ,__lowerCamelCase : List[Any]="base" ,__lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : Any=None ,__lowerCamelCase : Union[str, Any]=None ,**__lowerCamelCase : List[str] ,): '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__lowerCamelCase ) a = 0 a = Path(self.hparams.output_dir ) a = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: a = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path ,**({'''num_labels''': num_labels} if num_labels is not None else {}) ,cache_dir=__lowerCamelCase ,**__lowerCamelCase ,) else: a = config a = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams ,__lowerCamelCase ,__lowerCamelCase ): assert hasattr(self.config ,__lowerCamelCase ), F"""model config doesn't have a `{p}` attribute""" setattr(self.config ,__lowerCamelCase ,getattr(self.hparams ,__lowerCamelCase ) ) if tokenizer is None: a = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path ,cache_dir=__lowerCamelCase ,) else: a = tokenizer a = MODEL_MODES[mode] if model is None: a = self.model_type.from_pretrained( self.hparams.model_name_or_path ,from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) ,config=self.config ,cache_dir=__lowerCamelCase ,) else: a = model def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,*__lowerCamelCase : str ,**__lowerCamelCase : Optional[int] ): '''simple docstring''' a = self.model_type.from_pretrained(*__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = arg_to_scheduler[self.hparams.lr_scheduler] a = get_schedule_func( self.opt ,num_warmup_steps=self.hparams.warmup_steps ,num_training_steps=self.total_steps() ) a = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = self.model a = ['''bias''', '''LayerNorm.weight'''] a = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: a = Adafactor( __lowerCamelCase ,lr=self.hparams.learning_rate ,scale_parameter=__lowerCamelCase ,relative_step=__lowerCamelCase ) else: a = AdamW( __lowerCamelCase ,lr=self.hparams.learning_rate ,eps=self.hparams.adam_epsilon ) a = optimizer a = self.get_lr_scheduler() return [optimizer], [scheduler] def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : int ): '''simple docstring''' return self.validation_step(__lowerCamelCase ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[str] ): '''simple docstring''' return self.validation_end(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = max(1 ,self.hparams.gpus ) # TODO: consider num_tpu_cores a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' if stage == "test": a = len(self.test_dataloader().dataset ) else: a = self.get_dataloader('''train''' ,self.hparams.train_batch_size ,shuffle=__lowerCamelCase ) a = len(self.train_dataloader().dataset ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : str ,__lowerCamelCase : int ,__lowerCamelCase : bool = False ): '''simple docstring''' raise NotImplementedError('''You must implement this for your task''' ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' return self.train_loader def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return self.get_dataloader('''dev''' ,self.hparams.eval_batch_size ,shuffle=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' return self.get_dataloader('''test''' ,self.hparams.eval_batch_size ,shuffle=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : List[str] ): '''simple docstring''' return os.path.join( self.hparams.data_dir ,'''cached_{}_{}_{}'''.format( __lowerCamelCase ,list(filter(__lowerCamelCase ,self.hparams.model_name_or_path.split('''/''' ) ) ).pop() ,str(self.hparams.max_seq_length ) ,) ,) @pl.utilities.rank_zero_only def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Dict[str, Any] ): '''simple docstring''' a = self.output_dir.joinpath('''best_tfmr''' ) a = self.step_count self.model.save_pretrained(__lowerCamelCase ) self.tokenizer.save_pretrained(__lowerCamelCase ) @staticmethod def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : List[str] ): '''simple docstring''' parser.add_argument( '''--model_name_or_path''' ,default=__lowerCamelCase ,type=__lowerCamelCase ,required=__lowerCamelCase ,help='''Path to pretrained model or model identifier from huggingface.co/models''' ,) parser.add_argument( '''--config_name''' ,default='''''' ,type=__lowerCamelCase ,help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' ,default=__lowerCamelCase ,type=__lowerCamelCase ,help='''Pretrained tokenizer name or path if not the same as model_name''' ,) parser.add_argument( '''--cache_dir''' ,default=str(Path(__lowerCamelCase ).parent / '''test_run''' / '''cache''' ) ,type=__lowerCamelCase ,help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' ,) parser.add_argument( '''--encoder_layerdrop''' ,type=__lowerCamelCase ,help='''Encoder layer dropout probability (Optional). Goes into model.config''' ,) parser.add_argument( '''--decoder_layerdrop''' ,type=__lowerCamelCase ,help='''Decoder layer dropout probability (Optional). Goes into model.config''' ,) parser.add_argument( '''--dropout''' ,type=__lowerCamelCase ,help='''Dropout probability (Optional). Goes into model.config''' ,) parser.add_argument( '''--attention_dropout''' ,type=__lowerCamelCase ,help='''Attention dropout probability (Optional). Goes into model.config''' ,) parser.add_argument('''--learning_rate''' ,default=5e-5 ,type=__lowerCamelCase ,help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' ,default='''linear''' ,choices=__lowerCamelCase ,metavar=__lowerCamelCase ,type=__lowerCamelCase ,help='''Learning rate scheduler''' ,) parser.add_argument('''--weight_decay''' ,default=0.0 ,type=__lowerCamelCase ,help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=__lowerCamelCase ,help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' ,default=0 ,type=__lowerCamelCase ,help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' ,default=4 ,type=__lowerCamelCase ,help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' ,dest='''max_epochs''' ,default=3 ,type=__lowerCamelCase ) parser.add_argument('''--train_batch_size''' ,default=32 ,type=__lowerCamelCase ) parser.add_argument('''--eval_batch_size''' ,default=32 ,type=__lowerCamelCase ) parser.add_argument('''--adafactor''' ,action='''store_true''' ) class lowerCamelCase_ ( pl.Callback ): def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[Any] ): '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase_ ( pl.Callback ): def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[int] ): '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__lowerCamelCase ) class lowerCamelCase_ ( pl.Callback ): def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[Any] ): '''simple docstring''' a = trainer.lr_schedulers[0]['''scheduler'''] a = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : pl.Trainer ,__lowerCamelCase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Validation results *****''' ) a = trainer.callback_metrics # Log results for key in sorted(__lowerCamelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(__lowerCamelCase ,str(metrics[key] ) ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : pl.Trainer ,__lowerCamelCase : pl.LightningModule ): '''simple docstring''' rank_zero_info('''***** Test results *****''' ) a = trainer.callback_metrics # Log and save results to file a = os.path.join(pl_module.hparams.output_dir ,'''test_results.txt''' ) with open(__lowerCamelCase ,'''w''' ) as writer: for key in sorted(__lowerCamelCase ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(__lowerCamelCase ,str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(__lowerCamelCase ,str(metrics[key] ) ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> None: """simple docstring""" parser.add_argument( '''--output_dir''', default=str(Path(snake_case_ ).parent / '''test_run''' / '''model_checkpoints''' ), type=snake_case_, help='''The output directory where the model predictions and checkpoints will be written.''', ) parser.add_argument( '''--fp16''', action='''store_true''', help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''', ) parser.add_argument( '''--fp16_opt_level''', type=snake_case_, default='''O2''', help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ), ) parser.add_argument('''--n_tpu_cores''', dest='''tpu_cores''', type=snake_case_ ) parser.add_argument('''--max_grad_norm''', dest='''gradient_clip_val''', default=1.0, type=snake_case_, help='''Max gradient norm''' ) parser.add_argument('''--do_train''', action='''store_true''', help='''Whether to run training.''' ) parser.add_argument('''--do_predict''', action='''store_true''', help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''', dest='''accumulate_grad_batches''', type=snake_case_, default=1, help='''Number of updates steps to accumulate before performing a backward/update pass.''', ) parser.add_argument('''--seed''', type=snake_case_, default=4_2, help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''', default=str(Path(snake_case_ ).parent / '''test_run''' / '''dummy-train-data''' ), type=snake_case_, help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''', ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=True, snake_case_=[], snake_case_=None, snake_case_=None, **snake_case_, ) -> str: """simple docstring""" pl.seed_everything(args.seed ) # init model a = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=snake_case_ ) # add custom checkpoints if checkpoint_callback is None: a = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix='''checkpoint''', monitor='''val_loss''', mode='''min''', save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(snake_case_ ) if logging_callback is None: a = LoggingCallback() a = {} if args.fpaa: a = 1_6 if args.gpus > 1: a = '''auto''' a = '''ddp''' a = args.accumulate_grad_batches a = None a = '''auto''' a = pl.Trainer.from_argparse_args( snake_case_, weights_summary=snake_case_, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=snake_case_, val_check_interval=1, num_sanity_val_steps=2, **snake_case_, ) if args.do_train: trainer.fit(snake_case_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
357
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple: """simple docstring""" a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = 0.01 with locka.acquire(): with pytest.raises(snake_case_ ): a = time.time() locka.acquire(snake_case_ ) assert time.time() - _start > timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" a = '''a''' * 1_0_0_0 + '''.lock''' a = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(snake_case_ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 a = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(snake_case_ ): locka.acquire(0 )
330
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Dict = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCamelCase_ ( a_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'vit_mae' def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = decoder_num_attention_heads a = decoder_hidden_size a = decoder_num_hidden_layers a = decoder_intermediate_size a = mask_ratio a = norm_pix_loss
358
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Dict = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'vit_mae' def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = decoder_num_attention_heads a = decoder_hidden_size a = decoder_num_hidden_layers a = decoder_intermediate_size a = mask_ratio a = norm_pix_loss
330
0
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters UpperCamelCase__ : Tuple = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_=None, snake_case_=None ) -> str: """simple docstring""" if "." in tensor_name: a = tensor_name.split('''.''' ) for split in splits[:-1]: a = getattr(snake_case_, snake_case_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) a = new_module a = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) a = tensor_name in module._buffers a = getattr(snake_case_, snake_case_ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) a = False a = False if is_buffer or not is_bitsandbytes_available(): a = False a = False else: a = hasattr(bnb.nn, '''Params4bit''' ) and isinstance(module._parameters[tensor_name], bnb.nn.Paramsabit ) a = isinstance(module._parameters[tensor_name], bnb.nn.IntaParams ) if is_abit or is_abit: a = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: a = old_value.to(snake_case_ ) elif isinstance(snake_case_, torch.Tensor ): a = value.to('''cpu''' ) if value.dtype == torch.inta: a = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: a = torch.tensor(snake_case_, device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls, snake_case_ ) and fpaa_statistics is None: a = new_value.T a = old_value.__dict__ if is_abit: a = bnb.nn.IntaParams(snake_case_, requires_grad=snake_case_, **snake_case_ ).to(snake_case_ ) elif is_abit: a = bnb.nn.Paramsabit(snake_case_, requires_grad=snake_case_, **snake_case_ ).to(snake_case_ ) a = new_value if fpaa_statistics is not None: setattr(module.weight, '''SCB''', fpaa_statistics.to(snake_case_ ) ) else: if value is None: a = old_value.to(snake_case_ ) elif isinstance(snake_case_, torch.Tensor ): a = value.to(snake_case_ ) else: a = torch.tensor(snake_case_, device=snake_case_ ) if is_buffer: a = new_value else: a = nn.Parameter(snake_case_, requires_grad=old_value.requires_grad ) a = new_value def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None, snake_case_=None, snake_case_=None, snake_case_=False ) -> Optional[int]: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: a = [] current_key_name.append(snake_case_ ) if (isinstance(snake_case_, nn.Linear ) or isinstance(snake_case_, snake_case_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(snake_case_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(snake_case_, snake_case_ ): a , a = module.weight.shape else: a = module.in_features a = module.out_features if quantization_config.quantization_method() == "llm_int8": a = bnb.nn.LinearabitLt( snake_case_, snake_case_, module.bias is not None, has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight, threshold=quantization_config.llm_inta_threshold, ) a = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: a = bnb.nn.Linearabit( snake_case_, snake_case_, module.bias is not None, quantization_config.bnb_abit_compute_dtype, compress_statistics=quantization_config.bnb_abit_use_double_quant, quant_type=quantization_config.bnb_abit_quant_type, ) a = True # Store the module class in case we need to transpose the weight later a = type(snake_case_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(snake_case_ ) if len(list(module.children() ) ) > 0: a , a = _replace_with_bnb_linear( snake_case_, snake_case_, snake_case_, snake_case_, has_been_replaced=snake_case_, ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None, snake_case_=None, snake_case_=None ) -> Optional[Any]: """simple docstring""" a = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert a , a = _replace_with_bnb_linear( snake_case_, snake_case_, snake_case_, snake_case_ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def SCREAMING_SNAKE_CASE__ ( *snake_case_, **snake_case_ ) -> List[str]: """simple docstring""" warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''', snake_case_, ) return replace_with_bnb_linear(*snake_case_, **snake_case_ ) def SCREAMING_SNAKE_CASE__ ( *snake_case_, **snake_case_ ) -> int: """simple docstring""" warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''', snake_case_, ) return set_module_quantized_tensor_to_device(*snake_case_, **snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Any: """simple docstring""" a = deepcopy(snake_case_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() a = find_tied_parameters(snake_case_ ) # For compatibility with Accelerate < 0.18 if isinstance(snake_case_, snake_case_ ): a = sum(list(tied_params.values() ), [] ) + list(tied_params.keys() ) else: a = sum(snake_case_, [] ) a = len(snake_case_ ) > 0 # Check if it is a base model a = not hasattr(snake_case_, model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head a = list(model.named_children() ) a = [list_modules[-1][0]] # add last module together with tied weights a = set(snake_case_ ) - set(snake_case_ ) a = list(set(snake_case_ ) ) + list(snake_case_ ) # remove ".weight" from the keys a = ['''.weight''', '''.bias'''] a = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: a = name.replace(snake_case_, '''''' ) filtered_module_names.append(snake_case_ ) return filtered_module_names
359
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]: """simple docstring""" stooge(snake_case_, 0, len(snake_case_ ) - 1 ) return arr def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]: """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: a , a = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: a = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) # Recursively sort last 2/3 elements stooge(snake_case_, i + t, (snake_case_) ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) if __name__ == "__main__": UpperCamelCase__ : Dict = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase__ : Optional[int] = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
330
0
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Tuple: """simple docstring""" a = BertConfig.from_json_file(snake_case_ ) print(f"""Building PyTorch model from configuration: {config}""" ) a = BertForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_bert(snake_case_, snake_case_, snake_case_ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict(), snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCamelCase__ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
360
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } UpperCamelCase__ : Union[str, Any] = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } UpperCamelCase__ : str = { """jukebox""": 512, } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_LYRIC_TOKENS_SIZES SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=["v3", "v2", "v2"] ,__lowerCamelCase : List[Any]=5_12 ,__lowerCamelCase : Tuple=5 ,__lowerCamelCase : List[Any]="<|endoftext|>" ,**__lowerCamelCase : List[str] ,): '''simple docstring''' a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token super().__init__( unk_token=__lowerCamelCase ,n_genres=__lowerCamelCase ,version=__lowerCamelCase ,max_n_lyric_tokens=__lowerCamelCase ,**__lowerCamelCase ,) a = version a = max_n_lyric_tokens a = n_genres with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) a = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: a = oov.replace(r'''\-\'''' ,r'''\-+\'''' ) a = regex.compile(__lowerCamelCase ) a = {v: k for k, v in self.artists_encoder.items()} a = {v: k for k, v in self.genres_encoder.items()} a = {v: k for k, v in self.lyrics_encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ): '''simple docstring''' a = [self.artists_encoder.get(__lowerCamelCase ,0 ) for artist in list_artists] for genres in range(len(__lowerCamelCase ) ): a = [self.genres_encoder.get(__lowerCamelCase ,0 ) for genre in list_genres[genres]] a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) a = [[self.lyrics_encoder.get(__lowerCamelCase ,0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[str] ): '''simple docstring''' return list(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' a , a , a = self.prepare_for_tokenization(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = self._tokenize(__lowerCamelCase ) return artist, genre, lyrics def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": a = artists[idx].lower() a = [genres[idx].lower()] else: a = self._normalize(artists[idx] ) + '''.v2''' a = [ self._normalize(__lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' a = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )} a = 0 a = len(__lowerCamelCase ) + 1 a = self.vocab a = {v: k for k, v in self.vocab.items()} a = '''''' else: a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) a = self._run_strip_accents(__lowerCamelCase ) a = lyrics.replace('''\\''' ,'''\n''' ) a = self.out_of_vocab.sub('''''' ,__lowerCamelCase ), [], [] return artists, genres, lyrics def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ): '''simple docstring''' a = unicodedata.normalize('''NFD''' ,__lowerCamelCase ) a = [] for char in text: a = unicodedata.category(__lowerCamelCase ) if cat == "Mn": continue output.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ): '''simple docstring''' a = ( [chr(__lowerCamelCase ) for i in range(ord('''a''' ) ,ord('''z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''A''' ) ,ord('''Z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''0''' ) ,ord('''9''' ) + 1 )] + ['''.'''] ) a = frozenset(__lowerCamelCase ) a = re.compile(r'''_+''' ) a = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) a = pattern.sub('''_''' ,__lowerCamelCase ).strip('''_''' ) return text def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[str] ): '''simple docstring''' return " ".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : bool = False ): '''simple docstring''' if not isinstance(__lowerCamelCase ,__lowerCamelCase ): a = TensorType(__lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf a = tf.constant a = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch a = torch.tensor a = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 a = jnp.array a = _is_jax else: a = np.asarray a = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: a = [inputs] if not is_tensor(__lowerCamelCase ): a = as_tensor(__lowerCamelCase ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str]="" ,__lowerCamelCase : List[Any]="pt" ): '''simple docstring''' a = [0, 0, 0] a = [artist] * len(self.version ) a = [genres] * len(self.version ) a , a , a = self.tokenize(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a , a , a = self._convert_token_to_id(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = [-INFINITY] * len(full_tokens[-1] ) a = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=__lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=__lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : str ): '''simple docstring''' a = self.artists_decoder.get(__lowerCamelCase ) a = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index] a = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
330
0
import copy import os import cva import numpy as np from matplotlib import pyplot as plt class lowerCamelCase_ : def __init__( self : Any ): '''simple docstring''' a = '''''' a = '''''' a = [] a = 0 a = 2_56 a = 0 a = 0 a = 0 a = 0 def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Tuple ): '''simple docstring''' a = cva.imread(__lowerCamelCase ,0 ) a = copy.deepcopy(self.img ) a , a , a = plt.hist(self.img.ravel() ,2_56 ,[0, 2_56] ,label='''x''' ) a = np.sum(__lowerCamelCase ) for i in range(len(__lowerCamelCase ) ): a = x[i] / self.k self.sk += prk a = (self.L - 1) * self.sk if self.rem != 0: a = int(last % last ) a = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(__lowerCamelCase ) a = int(np.ma.count(self.img ) / self.img[1].size ) a = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): a = self.img[j][i] if num != self.last_list[num]: a = self.last_list[num] cva.imwrite('''output_data/output.jpg''' ,self.img ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' plt.hist(self.img.ravel() ,2_56 ,[0, 2_56] ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' cva.imshow('''Output-Image''' ,self.img ) cva.imshow('''Input-Image''' ,self.original_image ) cva.waitKey(50_00 ) cva.destroyAllWindows() if __name__ == "__main__": UpperCamelCase__ : Optional[Any] = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") UpperCamelCase__ : Tuple = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
361
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test UpperCamelCase__ : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab)))) UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ : Optional[Any] = Path(tmpdirname) UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) UpperCamelCase__ : Dict = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase__ : Union[str, Any] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""") UpperCamelCase__ : Tuple = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
330
0
import os UpperCamelCase__ : Optional[int] = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1_000} def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" a = 0 a = 0 while index < len(snake_case_ ) - 1: a = SYMBOLS[numerals[index]] a = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" a = '''''' a = num // 1_0_0_0 numerals += m_count * "M" num %= 1_0_0_0 a = num // 1_0_0 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_0_0 a = num // 1_0 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 1_0 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def SCREAMING_SNAKE_CASE__ ( snake_case_ = "/p089_roman.txt" ) -> int: """simple docstring""" a = 0 with open(os.path.dirname(snake_case_ ) + roman_numerals_filename ) as filea: a = filea.readlines() for line in lines: a = line.strip() a = parse_roman_numerals(snake_case_ ) a = generate_roman_numerals(snake_case_ ) savings += len(snake_case_ ) - len(snake_case_ ) return savings if __name__ == "__main__": print(F"{solution() = }")
362
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) UpperCamelCase__ : Optional[Any] = """bert-base-cased""" UpperCamelCase__ : int = """fp16""" UpperCamelCase__ : str = """bf16""" UpperCamelCase__ : List[Any] = [FPaa, BFaa] @require_fsdp @require_cuda class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' super().setUp() a = dict( ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = F"""{i + 1}""" a = strategy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = prefetch_policy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = state_dict_type with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = AutoModel.from_pretrained(__lowerCamelCase ) for policy in FSDP_AUTO_WRAP_POLICY: a = self.dist_env.copy() a = policy if policy == "TRANSFORMER_BASED_WRAP": a = '''BertLayer''' elif policy == "SIZE_BASED_WRAP": a = '''2000''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) a = self.dist_env.copy() a = '''TRANSFORMER_BASED_WRAP''' a = '''T5Layer''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() with self.assertRaises(__lowerCamelCase ) as cm: fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) ) a = self.dist_env.copy() a = '''SIZE_BASED_WRAP''' a = '''0''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: a = self.dist_env.copy() a = mp_dtype with mockenv_context(**__lowerCamelCase ): a = Accelerator() if mp_dtype == "fp16": a = torch.floataa elif mp_dtype == "bf16": a = torch.bfloataa a = MixedPrecision(param_dtype=__lowerCamelCase ,reduce_dtype=__lowerCamelCase ,buffer_dtype=__lowerCamelCase ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__lowerCamelCase ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,__lowerCamelCase ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: a = self.dist_env.copy() a = str(__lowerCamelCase ).lower() with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__lowerCamelCase ) ) @require_fsdp @require_multi_gpu @slow class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' super().setUp() a = 0.82 a = [ '''fsdp_shard_grad_op_transformer_based_wrap''', '''fsdp_full_shard_transformer_based_wrap''', ] a = { '''multi_gpu_fp16''': 32_00, '''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 20_00, '''fsdp_full_shard_transformer_based_wrap_fp16''': 19_00, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } a = 1_60 a = 1_60 a = inspect.getfile(accelerate.test_utils ) a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_performance.py''' ) a = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp'''] for config in self.performance_configs: a = cmd.copy() for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in config: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "fp32" in config: cmd_config.append('''--mixed_precision=no''' ) else: cmd_config.append('''--mixed_precision=fp16''' ) if "cpu_offload" in config: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--performance_lower_bound={self.performance_lower_bound}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''', '''--mixed_precision=fp16''', '''--fsdp_transformer_layer_cls_to_wrap=BertLayer''', ] for i, strategy in enumerate(__lowerCamelCase ): a = cmd.copy() cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) if strategy != "FULL_SHARD": continue a = len(__lowerCamelCase ) for state_dict_type in FSDP_STATE_DICT_TYPE: a = cmd_config[:state_dict_config_index] cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", '''--partial_train_epoch=1''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) a = cmd_config[:-1] a = os.path.join(self.tmpdir ,'''epoch_0''' ) cmd_config.extend( [ F"""--resume_from_checkpoint={resume_from_checkpoint}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): a = cmd.copy() if "fp16" in spec: cmd_config.extend(['''--mixed_precision=fp16'''] ) else: cmd_config.extend(['''--mixed_precision=no'''] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['''--use_fsdp'''] ) for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in spec: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "cpu_offload" in spec: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--peak_memory_upper_bound={peak_mem_upper_bound}""", F"""--n_train={self.n_train}""", F"""--n_val={self.n_val}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
330
0
"""simple docstring""" import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" a = [ '''decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(snake_case_, snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]: """simple docstring""" a , a = emb.weight.shape a = nn.Linear(snake_case_, snake_case_, bias=snake_case_ ) a = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" a = torch.load(snake_case_, map_location='''cpu''' ) a = Namespace(**checkpoint['''cfg''']['''model'''] ) a = checkpoint['''model'''] remove_ignore_keys_(snake_case_ ) a = state_dict['''decoder.embed_tokens.weight'''].shape[0] a = {key.replace('''decoder''', '''model''' ): val for key, val in state_dict.items()} a = XGLMConfig( vocab_size=snake_case_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='''gelu''', scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, ) a = XGLMForCausalLM(snake_case_ ) a = model.load_state_dict(snake_case_, strict=snake_case_ ) print(snake_case_ ) a = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCamelCase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCamelCase__ : List[Any] = parser.parse_args() UpperCamelCase__ : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
363
from __future__ import annotations import os from collections.abc import Mapping UpperCamelCase__ : Any = tuple[int, int] class lowerCamelCase_ : def __init__( self : Optional[Any] ,__lowerCamelCase : set[int] ,__lowerCamelCase : Mapping[EdgeT, int] ): '''simple docstring''' a = vertices a = { (min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items() } def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : EdgeT ,__lowerCamelCase : int ): '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) a = weight def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = Graph({min(self.vertices )} ,{} ) a = 42 a = 42 a = 42 a = 42 while len(subgraph.vertices ) < len(self.vertices ): a = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: a = edge a = weight subgraph.add_edge(__lowerCamelCase ,__lowerCamelCase ) return subgraph def SCREAMING_SNAKE_CASE__ ( snake_case_ = "p107_network.txt" ) -> int: """simple docstring""" a = os.path.abspath(os.path.dirname(snake_case_ ) ) a = os.path.join(snake_case_, snake_case_ ) a = {} a = 42 a = 42 a = 42 with open(snake_case_ ) as f: a = f.read().strip().split('''\n''' ) a = [line.split(''',''' ) for line in data] for edgea in range(1, len(snake_case_ ) ): for edgea in range(snake_case_ ): if adjaceny_matrix[edgea][edgea] != "-": a = int(adjaceny_matrix[edgea][edgea] ) a = Graph(set(range(len(snake_case_ ) ) ), snake_case_ ) a = graph.prims_algorithm() a = sum(graph.edges.values() ) a = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
330
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Tuple = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""} UpperCamelCase__ : str = { """vocab_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""", }, """emoji_file""": { """abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""", }, } UpperCamelCase__ : int = { """abeja/gpt-neox-japanese-2.7b""": 2_048, } def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" with open(snake_case_, '''r''', encoding='''utf-8''' ) as f: a = json.loads(f.read() ) a = collections.OrderedDict() a = collections.OrderedDict() a = collections.OrderedDict() with open(snake_case_, '''r''', encoding='''utf-8''' ) as f: a = f.readlines() a = [[t.rstrip('''\n''' )] if (t == ''',''' or ''',''' not in t) else t.rstrip('''\n''' ).split(''',''' ) for t in token] for idx, b in enumerate(snake_case_ ): a = b a = idx for wd in b: a = idx return vocab, raw_vocab, ids_to_tokens, emoji class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] def __init__( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ,__lowerCamelCase : Any="<|endoftext|>" ,__lowerCamelCase : List[str]="<|endoftext|>" ,__lowerCamelCase : Optional[int]="<|startoftext|>" ,__lowerCamelCase : Tuple="<|endoftext|>" ,__lowerCamelCase : List[str]=False ,**__lowerCamelCase : List[Any] ,): '''simple docstring''' super().__init__( unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,do_clean_text=__lowerCamelCase ,**__lowerCamelCase ,) if not os.path.isfile(__lowerCamelCase ): raise ValueError( F"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ''' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' ) if not os.path.isfile(__lowerCamelCase ): raise ValueError( F"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ''' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`''' ) a = do_clean_text a , a , a , a = load_vocab_and_emoji(__lowerCamelCase ,__lowerCamelCase ) a = SubWordJapaneseTokenizer( vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji ) @property def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' return len(self.raw_vocab ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' return dict(self.raw_vocab ,**self.added_tokens_encoder ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : List[str] ): '''simple docstring''' return self.subword_tokenizer.tokenize(__lowerCamelCase ,clean=self.do_clean_text ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : List[Any] ): '''simple docstring''' return self.vocab.get(__lowerCamelCase ,self.vocab.get(self.unk_token ) ) def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' return self.subword_tokenizer.convert_id_to_token(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Dict ): '''simple docstring''' a = ''''''.join(__lowerCamelCase ).strip() return out_string def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : "Conversation" ): '''simple docstring''' a = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) + [self.eos_token_id] ) if len(__lowerCamelCase ) > self.model_max_length: a = input_ids[-self.model_max_length :] return input_ids def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' a = 0 if os.path.isdir(__lowerCamelCase ): a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''emoji_file'''] ) else: a = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''vocab_file'''] ) a = ( (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory + VOCAB_FILES_NAMES['''emoji_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) a = token_index writer.write(''','''.join(__lowerCamelCase ) + '''\n''' ) index += 1 with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as writer: json.dump(self.emoji ,__lowerCamelCase ) return vocab_file, emoji_file class lowerCamelCase_ ( a_ ): def __init__( self : Tuple ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : str ): '''simple docstring''' a = vocab # same as swe a = ids_to_tokens # same as bpe a = emoji a = np.max([len(__lowerCamelCase ) for w in self.vocab.keys()] ) a = re.compile(r'''(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)''' ) a = re.compile(r'''[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*''' ) a = re.compile(r'''[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}''' ) a = re.compile( r'''([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' ) a = re.compile( r'''(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*''' ) a = re.compile( r'''((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*''' ) a = '''─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿''' a = '''▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟''' a = str.maketrans({k: '''<BLOCK>''' for k in keisen + blocks} ) def __len__( self : Optional[int] ): '''simple docstring''' return len(self.ids_to_tokens ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : List[Any] ): '''simple docstring''' a = self.content_repattera.sub('''<URL>''' ,__lowerCamelCase ) a = self.content_repattera.sub('''<EMAIL>''' ,__lowerCamelCase ) a = self.content_repattera.sub('''<TEL>''' ,__lowerCamelCase ) a = self.content_repattera.sub('''<DATE>''' ,__lowerCamelCase ) a = self.content_repattera.sub('''<DATE>''' ,__lowerCamelCase ) a = self.content_repattera.sub('''<PRICE>''' ,__lowerCamelCase ) a = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: a = content.replace('''<BLOCK><BLOCK>''' ,'''<BLOCK>''' ) return content def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : int=False ): '''simple docstring''' a = text.replace(''' ''' ,'''<SP>''' ) a = text.replace(''' ''' ,'''<SP>''' ) a = text.replace('''\r\n''' ,'''<BR>''' ) a = text.replace('''\n''' ,'''<BR>''' ) a = text.replace('''\r''' ,'''<BR>''' ) a = text.replace('''\t''' ,'''<TAB>''' ) a = text.replace('''—''' ,'''ー''' ) a = text.replace('''−''' ,'''ー''' ) for k, v in self.emoji["emoji"].items(): if k in text: a = text.replace(__lowerCamelCase ,__lowerCamelCase ) if clean: a = self.clean_text(__lowerCamelCase ) def check_simbol(__lowerCamelCase : Dict ): a = x.encode() if len(__lowerCamelCase ) == 1 and len(__lowerCamelCase ) == 2: a = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc_2a1 and c <= 0Xc_2bf) or (c >= 0Xc_780 and c <= 0Xc_783) or (c >= 0Xc_ab9 and c <= 0Xc_bbf) or (c >= 0Xc_c80 and c <= 0Xc_da2) ): return True return False def checkuae(__lowerCamelCase : List[str] ): a = x.encode() if len(__lowerCamelCase ) == 1 and len(__lowerCamelCase ) == 3: a = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe28_080 and c <= 0Xe2b_07f: return True return False a = 0 a = [] while pos < len(__lowerCamelCase ): a = min(len(__lowerCamelCase ) ,pos + self.maxlen + 1 ) if text[pos] == '''<''' else pos + 3 a = [] # (token_id, token, pos) for e in range(__lowerCamelCase ,__lowerCamelCase ,-1 ): a = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(__lowerCamelCase ) > 2: a = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(__lowerCamelCase ) > 0: # the smallest token_id is adopted a , a , a = sorted(__lowerCamelCase ,key=lambda __lowerCamelCase : x[0] )[0] result.append(__lowerCamelCase ) a = e else: a = pos + 1 a = text[pos:end] if check_simbol(__lowerCamelCase ): result.append('''<KIGOU>''' ) elif checkuae(__lowerCamelCase ): result.append('''<U2000U2BFF>''' ) else: for i in wd.encode('''utf-8''' ): result.append('''<|byte%d|>''' % i ) a = end return result def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Tuple="\n" ): '''simple docstring''' a = [] a = [] a = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(__lowerCamelCase ) > 0: words.append(bytearray(__lowerCamelCase ).decode('''utf-8''' ,errors='''replace''' ) ) a = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['''emoji_inv'''][word] ) elif word == "<SP>": words.append(''' ''' ) elif word == "<BR>": words.append(__lowerCamelCase ) elif word == "<TAB>": words.append('''\t''' ) elif word == "<BLOCK>": words.append('''▀''' ) elif word == "<KIGOU>": words.append('''ǀ''' ) elif word == "<U2000U2BFF>": words.append('''‖''' ) else: words.append(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: words.append(bytearray(__lowerCamelCase ).decode('''utf-8''' ,errors='''replace''' ) ) a = ''''''.join(__lowerCamelCase ) return text
364
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase__ : List[Any] = logging.get_logger(__name__) # General docstring UpperCamelCase__ : List[Any] = """RegNetConfig""" # Base docstring UpperCamelCase__ : Dict = """facebook/regnet-y-040""" UpperCamelCase__ : int = [1, 1_088, 7, 7] # Image classification docstring UpperCamelCase__ : Optional[Any] = """facebook/regnet-y-040""" UpperCamelCase__ : Dict = """tabby, tabby cat""" UpperCamelCase__ : Dict = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : Optional[str] = "relu" ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=__lowerCamelCase ,strides=__lowerCamelCase ,padding='''VALID''' ,groups=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ,) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) a = ACTaFN[activation] if activation is not None else tf.identity def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : List[str] ): '''simple docstring''' a = self.convolution(self.padding(__lowerCamelCase ) ) a = self.normalization(__lowerCamelCase ) a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Any ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : List[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config.num_channels a = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='''embedder''' ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = shape_list(__lowerCamelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) a = tf.transpose(__lowerCamelCase ,perm=(0, 2, 3, 1) ) a = self.embedder(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : str ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Tuple ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=1 ,strides=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ): '''simple docstring''' return self.normalization(self.convolution(__lowerCamelCase ) ,training=__lowerCamelCase ) class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : int ,**__lowerCamelCase : str ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) a = [ tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''relu''' ,name='''attention.0''' ), tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''sigmoid''' ,name='''attention.2''' ), ] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = self.pooler(__lowerCamelCase ) for layer_module in self.attention: a = layer_module(__lowerCamelCase ) a = hidden_state * pooled return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : Dict ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.2''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : List[str] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetSELayer(__lowerCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ,name='''layer.2''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.3''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer a = [ # downsampling is done in the first layer with stride of 2 layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ,name='''layers.0''' ), *[layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ): '''simple docstring''' for layer_module in self.layers: a = layer_module(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __lowerCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='''stages.0''' ,) ) a = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCamelCase ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,depth=__lowerCamelCase ,name=F"""stages.{i+1}""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = True ): '''simple docstring''' a = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: a = hidden_states + (hidden_state,) a = stage_module(__lowerCamelCase ) if output_hidden_states: a = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase ,hidden_states=__lowerCamelCase ) @keras_serializable class lowerCamelCase_ ( tf.keras.layers.Layer ): SCREAMING_SNAKE_CASE_ = RegNetConfig def __init__( self : Dict ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config a = TFRegNetEmbeddings(__lowerCamelCase ,name='''embedder''' ) a = TFRegNetEncoder(__lowerCamelCase ,name='''encoder''' ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) @unpack_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : bool = False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.embedder(__lowerCamelCase ,training=__lowerCamelCase ) a = self.encoder( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = encoder_outputs[0] a = self.pooler(__lowerCamelCase ) # Change to NCHW output format have uniformity in the modules a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: a = tuple([tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = RegNetConfig SCREAMING_SNAKE_CASE_ = 'regnet' SCREAMING_SNAKE_CASE_ = 'pixel_values' @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} UpperCamelCase__ : Union[str, Any] = R""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCamelCase__ : List[str] = R""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , a_ , ) class lowerCamelCase_ ( a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : int ,**__lowerCamelCase : Union[str, Any] ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : List[str]=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( pixel_values=__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a_ , ) class lowerCamelCase_ ( a_ , a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : str ,**__lowerCamelCase : Any ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = config.num_labels a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) # classification head a = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Dict=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = outputs.pooler_output if return_dict else outputs[1] a = self.classifier[0](__lowerCamelCase ) a = self.classifier[1](__lowerCamelCase ) a = None if labels is None else self.hf_compute_loss(labels=__lowerCamelCase ,logits=__lowerCamelCase ) if not return_dict: a = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states )
330
0
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _A : Tuple = logging.get_logger(__name__) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] def __init__( self : Optional[int] ,__lowerCamelCase : Optional[Any]="</s>" ,__lowerCamelCase : Union[str, Any]="<unk>" ,__lowerCamelCase : Dict="<pad>" ,__lowerCamelCase : Optional[int]=1_25 ,__lowerCamelCase : Tuple=None ,**__lowerCamelCase : Optional[int] ,): '''simple docstring''' if extra_ids > 0 and additional_special_tokens is None: a = [F"""<extra_id_{i}>""" for i in range(__lowerCamelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens a = len(set(filter(lambda __lowerCamelCase : bool('''extra_id''' in str(__lowerCamelCase ) ) ,__lowerCamelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" ''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the''' ''' extra_ids tokens''' ) a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else pad_token a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else eos_token a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token super().__init__( eos_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,extra_ids=__lowerCamelCase ,additional_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,) a = extra_ids a = 2**8 # utf is 8 bits # define special tokens dict a = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } a = len(self.special_tokens_encoder ) a = len(__lowerCamelCase ) for i, token in enumerate(__lowerCamelCase ): a = self.vocab_size + i - n a = {v: k for k, v in self.special_tokens_encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ,__lowerCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCamelCase ,token_ids_a=__lowerCamelCase ,already_has_special_tokens=__lowerCamelCase ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(__lowerCamelCase )) + [1] return ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : List[int] ): '''simple docstring''' if len(__lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' a = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' a = self._add_eos_if_not_present(__lowerCamelCase ) if token_ids_a is None: return token_ids_a else: a = self._add_eos_if_not_present(__lowerCamelCase ) return token_ids_a + token_ids_a def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ): '''simple docstring''' a = [chr(__lowerCamelCase ) for i in text.encode('''utf-8''' )] return tokens def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Tuple ): '''simple docstring''' if token in self.special_tokens_encoder: a = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: a = self.added_tokens_encoder[token] elif len(__lowerCamelCase ) != 1: a = self.unk_token_id else: a = ord(__lowerCamelCase ) + self._num_special_tokens return token_id def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[Any] ): '''simple docstring''' if index in self.special_tokens_decoder: a = self.special_tokens_decoder[index] else: a = chr(index - self._num_special_tokens ) return token def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Any ): '''simple docstring''' a = B'''''' for token in tokens: if token in self.special_tokens_decoder: a = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.added_tokens_decoder: a = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.special_tokens_encoder: a = token.encode('''utf-8''' ) elif token in self.added_tokens_encoder: a = token.encode('''utf-8''' ) else: a = bytes([ord(__lowerCamelCase )] ) bstring += tok_string a = bstring.decode('''utf-8''' ,errors='''ignore''' ) return string def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' return ()
365
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'efficientformer' def __init__( self : Optional[int] ,__lowerCamelCase : List[int] = [3, 2, 6, 4] ,__lowerCamelCase : List[int] = [48, 96, 2_24, 4_48] ,__lowerCamelCase : List[bool] = [True, True, True, True] ,__lowerCamelCase : int = 4_48 ,__lowerCamelCase : int = 32 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : int = 7 ,__lowerCamelCase : int = 5 ,__lowerCamelCase : int = 8 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 16 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : bool = True ,__lowerCamelCase : bool = True ,__lowerCamelCase : float = 1e-5 ,__lowerCamelCase : str = "gelu" ,__lowerCamelCase : float = 0.02 ,__lowerCamelCase : float = 1e-12 ,__lowerCamelCase : int = 2_24 ,__lowerCamelCase : float = 1e-05 ,**__lowerCamelCase : Dict ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_act a = hidden_dropout_prob a = hidden_sizes a = num_hidden_layers a = num_attention_heads a = initializer_range a = layer_norm_eps a = patch_size a = num_channels a = depths a = mlp_expansion_ratio a = downsamples a = dim a = key_dim a = attention_ratio a = resolution a = pool_size a = downsample_patch_size a = downsample_stride a = downsample_pad a = drop_path_rate a = num_metaad_blocks a = distillation a = use_layer_scale a = layer_scale_init_value a = image_size a = batch_norm_eps
330
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase__ : int = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : int = ["""EncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Union[str, Any] = ["""TFEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Tuple = ["""FlaxEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
366
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCamelCase__ : Any = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] UpperCamelCase__ : Optional[Any] = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] UpperCamelCase__ : Optional[Any] = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : List[str] = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : Optional[int] = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for tf_name, hf_name in patterns: a = k.replace(snake_case_, snake_case_ ) return k def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> BigBirdPegasusForConditionalGeneration: """simple docstring""" a = BigBirdPegasusConfig(**snake_case_ ) a = BigBirdPegasusForConditionalGeneration(snake_case_ ) a = torch_model.state_dict() a = {} # separating decoder weights a = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} a = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = DECODER_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = REMAINING_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" a = mapping['''model.embed_positions.weight'''] a = mapping.pop('''model.embed_positions.weight''' ) a , a = torch_model.load_state_dict(snake_case_, strict=snake_case_ ) a = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict: """simple docstring""" a = tf.train.list_variables(snake_case_ ) a = {} a = ['''global_step'''] for name, shape in tqdm(snake_case_, desc='''converting tf checkpoint to dict''' ): a = any(pat in name for pat in ignore_name ) if skip_key: continue a = tf.train.load_variable(snake_case_, snake_case_ ) a = array return tf_weights def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int: """simple docstring""" a = get_tf_weights_as_numpy(snake_case_ ) a = convert_bigbird_pegasus(snake_case_, snake_case_ ) torch_model.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCamelCase__ : int = parser.parse_args() UpperCamelCase__ : Tuple = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
0
import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
367
import re def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" if len(re.findall('''[ATCG]''', snake_case_ ) ) != len(snake_case_ ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
330
0
"""simple docstring""" import os import pytest from datasets import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, ) UpperCamelCase__ : str = pytest.mark.integration @pytest.mark.parametrize('''path''', ['''paws''', '''csv'''] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Tuple: """simple docstring""" inspect_dataset(snake_case_, snake_case_ ) a = path + '''.py''' assert script_name in os.listdir(snake_case_ ) assert "__pycache__" not in os.listdir(snake_case_ ) @pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.parametrize('''path''', ['''accuracy'''] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str: """simple docstring""" inspect_metric(snake_case_, snake_case_ ) a = path + '''.py''' assert script_name in os.listdir(snake_case_ ) assert "__pycache__" not in os.listdir(snake_case_ ) @pytest.mark.parametrize( '''path, config_name, expected_splits''', [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ], ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> str: """simple docstring""" a = get_dataset_config_info(snake_case_, config_name=snake_case_ ) assert info.config_name == config_name assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''', [ ('''paws''', None, ValueError), ], ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int: """simple docstring""" with pytest.raises(snake_case_ ): get_dataset_config_info(snake_case_, config_name=snake_case_ ) @pytest.mark.parametrize( '''path, expected''', [ ('''squad''', '''plain_text'''), ('''acronym_identification''', '''default'''), ('''lhoestq/squad''', '''plain_text'''), ('''lhoestq/test''', '''default'''), ('''lhoestq/demo1''', '''lhoestq--demo1'''), ('''dalle-mini/wit''', '''dalle-mini--wit'''), ], ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Any: """simple docstring""" a = get_dataset_config_names(snake_case_ ) assert expected in config_names @pytest.mark.parametrize( '''path, expected_configs, expected_splits_in_first_config''', [ ('''squad''', ['''plain_text'''], ['''train''', '''validation''']), ('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']), ('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']), ], ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Any: """simple docstring""" a = get_dataset_infos(snake_case_ ) assert list(infos.keys() ) == expected_configs a = expected_configs[0] assert expected_config in infos a = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits_in_first_config @pytest.mark.parametrize( '''path, expected_config, expected_splits''', [ ('''squad''', '''plain_text''', ['''train''', '''validation''']), ('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']), ('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']), ], ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Tuple: """simple docstring""" a = get_dataset_infos(snake_case_ ) assert expected_config in infos a = infos[expected_config] assert info.config_name == expected_config assert list(info.splits.keys() ) == expected_splits @pytest.mark.parametrize( '''path, config_name, expected_exception''', [ ('''paws''', None, ValueError), ], ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> List[str]: """simple docstring""" with pytest.raises(snake_case_ ): get_dataset_split_names(snake_case_, config_name=snake_case_ )
368
from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count += 1 a = '''_''' if count > 1: return False else: return "".join(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]: """simple docstring""" a = [] while True: a = ['''$'''] * len(snake_case_ ) a = [] for i in range(len(snake_case_ ) ): for j in range(i + 1, len(snake_case_ ) ): a = compare_string(binary[i], binary[j] ) if k is False: a = '''*''' a = '''*''' temp.append('''X''' ) for i in range(len(snake_case_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(snake_case_ ) == 0: return pi a = list(set(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] for minterm in minterms: a = '''''' for _ in range(snake_case_ ): a = str(minterm % 2 ) + string minterm //= 2 temp.append(snake_case_ ) return temp def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] a = [0] * len(snake_case_ ) for i in range(len(chart[0] ) ): a = 0 a = -1 for j in range(len(snake_case_ ) ): if chart[j][i] == 1: count += 1 a = j if count == 1: a = 1 for i in range(len(snake_case_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(snake_case_ ) ): a = 0 temp.append(prime_implicants[i] ) while True: a = 0 a = -1 a = 0 for i in range(len(snake_case_ ) ): a = chart[i].count(1 ) if count_n > max_n: a = count_n a = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(snake_case_ ) ): a = 0 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]: """simple docstring""" a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )] for i in range(len(snake_case_ ) ): a = prime_implicants[i].count('''_''' ) for j in range(len(snake_case_ ) ): if is_for_table(prime_implicants[i], binary[j], snake_case_ ): a = 1 return chart def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" a = int(input('''Enter the no. of variables\n''' ) ) a = [ float(snake_case_ ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] a = decimal_to_binary(snake_case_, snake_case_ ) a = check(snake_case_ ) print('''Prime Implicants are:''' ) print(snake_case_ ) a = prime_implicant_chart(snake_case_, snake_case_ ) a = selection(snake_case_, snake_case_ ) print('''Essential Prime Implicants are:''' ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
330
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase__ : List[str] = logging.get_logger(__name__) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = ['pixel_values'] def __init__( self : List[str] ,__lowerCamelCase : bool = True ,__lowerCamelCase : Optional[Dict[str, int]] = None ,__lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR ,__lowerCamelCase : bool = True ,__lowerCamelCase : Dict[str, int] = None ,__lowerCamelCase : bool = True ,__lowerCamelCase : Union[int, float] = 1 / 2_55 ,__lowerCamelCase : bool = True ,__lowerCamelCase : Optional[Union[float, List[float]]] = None ,__lowerCamelCase : Optional[Union[float, List[float]]] = None ,**__lowerCamelCase : int ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = size if size is not None else {'''shortest_edge''': 2_56} a = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ) a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} a = get_size_dict(__lowerCamelCase ) a = do_resize a = size a = resample a = do_center_crop a = crop_size a = do_rescale a = rescale_factor a = do_normalize a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN a = image_std if image_std is not None else IMAGENET_STANDARD_STD def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : np.ndarray ,__lowerCamelCase : Dict[str, int] ,__lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC ,__lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**__lowerCamelCase : Optional[int] ,): '''simple docstring''' a = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) a = get_resize_output_image_size(__lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=__lowerCamelCase ) return resize(__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : np.ndarray ,__lowerCamelCase : Dict[str, int] ,__lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**__lowerCamelCase : Dict ,): '''simple docstring''' a = get_size_dict(__lowerCamelCase ) return center_crop(__lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : np.ndarray ,__lowerCamelCase : float ,__lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**__lowerCamelCase : Tuple ): '''simple docstring''' return rescale(__lowerCamelCase ,scale=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : np.ndarray ,__lowerCamelCase : Union[float, List[float]] ,__lowerCamelCase : Union[float, List[float]] ,__lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**__lowerCamelCase : List[str] ,): '''simple docstring''' return normalize(__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : ImageInput ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Dict[str, int] = None ,__lowerCamelCase : PILImageResampling = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Dict[str, int] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[float] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[Union[float, List[float]]] = None ,__lowerCamelCase : Optional[Union[float, List[float]]] = None ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**__lowerCamelCase : Dict ,): '''simple docstring''' a = do_resize if do_resize is not None else self.do_resize a = size if size is not None else self.size a = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ) a = resample if resample is not None else self.resample a = do_center_crop if do_center_crop is not None else self.do_center_crop a = crop_size if crop_size is not None else self.crop_size a = get_size_dict(__lowerCamelCase ) a = do_rescale if do_rescale is not None else self.do_rescale a = rescale_factor if rescale_factor is not None else self.rescale_factor a = do_normalize if do_normalize is not None else self.do_normalize a = image_mean if image_mean is not None else self.image_mean a = image_std if image_std is not None else self.image_std a = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. a = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: a = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images] if do_center_crop: a = [self.center_crop(image=__lowerCamelCase ,size=__lowerCamelCase ) for image in images] if do_rescale: a = [self.rescale(image=__lowerCamelCase ,scale=__lowerCamelCase ) for image in images] if do_normalize: a = [self.normalize(image=__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ) for image in images] a = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images] a = {'''pixel_values''': images} return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
369
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCamelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a_ ) class lowerCamelCase_ ( a_ ): def __init__( self : int ,*__lowerCamelCase : str ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(*__lowerCamelCase ,**__lowerCamelCase ) requires_backends(self ,'''vision''' ) self.check_model_type(__lowerCamelCase ) def __call__( self : int ,__lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**__lowerCamelCase : str ): '''simple docstring''' return super().__call__(__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,**__lowerCamelCase : Dict ): '''simple docstring''' return {}, {}, {} def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = load_image(__lowerCamelCase ) a = image.size a = self.image_processor(images=__lowerCamelCase ,return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = self.model(**__lowerCamelCase ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = model_outputs.predicted_depth a = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=__lowerCamelCase ) a = prediction.squeeze().cpu().numpy() a = (output * 2_55 / np.max(__lowerCamelCase )).astype('''uint8''' ) a = Image.fromarray(__lowerCamelCase ) a = {} a = predicted_depth a = depth return output_dict
330
0
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCamelCase__ = logging.getLogger(__name__) UpperCamelCase__ = """pytorch_model.bin""" @dataclasses.dataclass class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , ) @dataclasses.dataclass class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'A csv or a json file containing the validation data.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'The name of the task to train on.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default='no' , metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' } , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=0.0 , metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' } , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=1_00 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) SCREAMING_SNAKE_CASE_ = dataclasses.field( default=a_ , metadata={'help': 'Random seed for initialization.'} , ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = datasets.concatenate_datasets([infer_input, infer_output], axis=1 ) if args.do_filter_by_confidence: a = dataset.filter(lambda snake_case_ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 a = int(eval_result * len(snake_case_ ) ) print(snake_case_ ) a = dataset.sort('''probability''', reverse=snake_case_ ) a = dataset.select(range(snake_case_ ) ) a = dataset.remove_columns(['''label''', '''probability'''] ) a = dataset.rename_column('''prediction''', '''label''' ) a = dataset.map(lambda snake_case_ : {"label": idalabel[example["label"]]} ) a = dataset.shuffle(seed=args.seed ) a = os.path.join(snake_case_, f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(snake_case_, index=snake_case_ ) else: dataset.to_json(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, **snake_case_ ) -> Optional[Any]: """simple docstring""" a = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() a = STModelArguments(model_name_or_path=snake_case_ ) a = STDataArguments(train_file=snake_case_, infer_file=snake_case_ ) a = STTrainingArguments(output_dir=snake_case_ ) a = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(snake_case_ ).items(): setattr(snake_case_, snake_case_, snake_case_ ) for key, value in kwargs.items(): if hasattr(snake_case_, snake_case_ ): setattr(snake_case_, snake_case_, snake_case_ ) # Sanity checks a = {} a = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None a = args.train_file a = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None a = args.eval_file for key in data_files: a = data_files[key].split('''.''' )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: a = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info('''Creating the initial data directory for self-training...''' ) a = f"""{args.output_dir}/self-train_iter-{{}}""".format a = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=snake_case_ ) os.makedirs(snake_case_, exist_ok=snake_case_ ) accelerator.wait_for_everyone() a = None a = None a = 0 a = False # Show the progress bar a = tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0, int(args.max_selftrain_iterations ) ): a = data_dir_format(snake_case_ ) assert os.path.exists(snake_case_ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 a = os.path.join(snake_case_, '''stage-1''' ) a = { '''accelerator''': accelerator, '''model_name_or_path''': args.model_name_or_path, '''cache_dir''': args.cache_dir, '''do_train''': True, '''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''], '''do_eval''': True if args.eval_file is not None else False, '''eval_file''': data_files['''eval'''], '''do_predict''': True, '''infer_file''': data_files['''infer'''], '''task_name''': args.task_name, '''label_list''': args.label_list, '''output_dir''': current_output_dir, '''eval_metric''': args.eval_metric, '''evaluation_strategy''': args.evaluation_strategy, '''early_stopping_patience''': args.early_stopping_patience, '''early_stopping_threshold''': args.early_stopping_threshold, '''seed''': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(snake_case_, snake_case_ ): arguments_dict.update({key: value} ) a = os.path.join(snake_case_, '''best-checkpoint''', snake_case_ ) if os.path.exists(snake_case_ ): logger.info( '''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''', snake_case_, snake_case_, ) else: logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''', snake_case_ ) finetune(**snake_case_ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case_ ) logger.info('''Self-training job completed: iteration: %d, stage: 1.''', snake_case_ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data a = os.path.join(snake_case_, '''best-checkpoint''' ) a = os.path.join(snake_case_, '''stage-2''' ) # Update arguments_dict a = model_path a = data_files['''train'''] a = current_output_dir a = os.path.join(snake_case_, '''best-checkpoint''', snake_case_ ) if os.path.exists(snake_case_ ): logger.info( '''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''', snake_case_, snake_case_, ) else: logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''', snake_case_ ) finetune(**snake_case_ ) accelerator.wait_for_everyone() assert os.path.exists(snake_case_ ) logger.info('''Self-training job completed: iteration: %d, stage: 2.''', snake_case_ ) a = iteration a = data_dir_format(iteration + 1 ) a = AutoConfig.from_pretrained(os.path.join(snake_case_, '''best-checkpoint''' ) ) a = config.idalabel a = os.path.join(snake_case_, '''eval_results_best-checkpoint.json''' ) a = os.path.join(snake_case_, '''test_results_best-checkpoint.json''' ) assert os.path.exists(snake_case_ ) with open(snake_case_, '''r''' ) as f: a = float(json.load(snake_case_ )[args.eval_metric] ) a = os.path.join(snake_case_, '''infer_output_best-checkpoint.csv''' ) assert os.path.exists(snake_case_ ) # Loading the dataset from local csv or json files. a = load_dataset(args.data_file_extension, data_files={'''data''': data_files['''infer''']} )['''data'''] a = load_dataset('''csv''', data_files={'''data''': infer_output_file} )['''data'''] if accelerator.is_main_process: os.makedirs(snake_case_, exist_ok=snake_case_ ) shutil.copy(snake_case_, os.path.join(snake_case_, f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(snake_case_ ): shutil.copy(snake_case_, os.path.join(snake_case_, f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) accelerator.wait_for_everyone() a = os.path.join(snake_case_, f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: a = eval_result if best_iteration is None: a = new_iteration a = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: a = new_iteration a = new_eval_result a = 0 else: if new_eval_result == best_eval_result: a = new_iteration a = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: a = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info('''Best iteration: %d''', snake_case_ ) logger.info('''Best evaluation result: %s = %f''', args.eval_metric, snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case_, f"""eval_results_iter-{iteration}.json""" ), os.path.join(snake_case_, '''eval_results_best-iteration.json''' ), ) else: # Assume that the last iteration is the best logger.info('''Best iteration: %d''', args.max_selftrain_iterations - 1 ) logger.info('''Best evaluation result: %s = %f''', args.eval_metric, snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(snake_case_, f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ), os.path.join(snake_case_, '''eval_results_best-iteration.json''' ), )
370
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a_ ) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} ) SCREAMING_SNAKE_CASE_ = Features({} ) SCREAMING_SNAKE_CASE_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' return {self.text_column: "text"}
330
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[Any]=13 ,__lowerCamelCase : Tuple=7 ,__lowerCamelCase : List[str]=True ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : int=True ,__lowerCamelCase : List[str]=True ,__lowerCamelCase : List[str]=99 ,__lowerCamelCase : Optional[Any]=32 ,__lowerCamelCase : Optional[int]=5 ,__lowerCamelCase : List[Any]=4 ,__lowerCamelCase : int=37 ,__lowerCamelCase : Union[str, Any]="gelu" ,__lowerCamelCase : Optional[int]=0.1 ,__lowerCamelCase : Optional[Any]=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=16 ,__lowerCamelCase : Dict=2 ,__lowerCamelCase : List[str]=0.02 ,__lowerCamelCase : Union[str, Any]=4 ,): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_attention_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_choices def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) a = None if self.use_attention_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None if self.use_token_type_ids: a = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) a = BertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,) return config, input_ids, token_type_ids, attention_mask def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a , a = config_and_inputs a = True a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) a = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class lowerCamelCase_ ( a_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = FlaxBertModelTester(self ) @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = FlaxBertModel.from_pretrained('''bert-base-cased''' ) a = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase )
371
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'yolos' def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = num_detection_tokens a = use_mid_position_embeddings a = auxiliary_loss # Hungarian matcher a = class_cost a = bbox_cost a = giou_cost # Loss coefficients a = bbox_loss_coefficient a = giou_loss_coefficient a = eos_coefficient class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' return 1e-4 @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return 12
330
0
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCamelCase__ : Optional[int] = logging.getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]: """simple docstring""" a = np.argmax(snake_case_, axis=1 ) return np.sum(outputs == labels ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" with open(snake_case_, encoding='''utf_8''' ) as f: a = csv.reader(snake_case_ ) a = [] next(snake_case_ ) # skip the first line for line in tqdm(snake_case_ ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = [] for dataset in encoded_datasets: a = len(snake_case_ ) a = np.zeros((n_batch, 2, input_len), dtype=np.intaa ) a = np.zeros((n_batch, 2), dtype=np.intaa ) a = np.full((n_batch, 2, input_len), fill_value=-1_0_0, dtype=np.intaa ) a = np.zeros((n_batch,), dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(snake_case_ ): a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] a = with_conta a = with_conta a = len(snake_case_ ) - 1 a = len(snake_case_ ) - 1 a = with_conta a = with_conta a = mc_label a = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(snake_case_ ) for t in all_inputs ) ) return tensor_datasets def SCREAMING_SNAKE_CASE__ ( ) -> str: """simple docstring""" a = argparse.ArgumentParser() parser.add_argument('''--model_name''', type=snake_case_, default='''openai-gpt''', help='''pretrained model name''' ) parser.add_argument('''--do_train''', action='''store_true''', help='''Whether to run training.''' ) parser.add_argument('''--do_eval''', action='''store_true''', help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''', default=snake_case_, type=snake_case_, required=snake_case_, help='''The output directory where the model predictions and checkpoints will be written.''', ) parser.add_argument('''--train_dataset''', type=snake_case_, default='''''' ) parser.add_argument('''--eval_dataset''', type=snake_case_, default='''''' ) parser.add_argument('''--seed''', type=snake_case_, default=4_2 ) parser.add_argument('''--num_train_epochs''', type=snake_case_, default=3 ) parser.add_argument('''--train_batch_size''', type=snake_case_, default=8 ) parser.add_argument('''--eval_batch_size''', type=snake_case_, default=1_6 ) parser.add_argument('''--adam_epsilon''', default=1e-8, type=snake_case_, help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''', type=snake_case_, default=1 ) parser.add_argument( '''--max_steps''', default=-1, type=snake_case_, help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ), ) parser.add_argument( '''--gradient_accumulation_steps''', type=snake_case_, default=1, help='''Number of updates steps to accumulate before performing a backward/update pass.''', ) parser.add_argument('''--learning_rate''', type=snake_case_, default=6.25e-5 ) parser.add_argument('''--warmup_steps''', default=0, type=snake_case_, help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''', type=snake_case_, default='''warmup_linear''' ) parser.add_argument('''--weight_decay''', type=snake_case_, default=0.01 ) parser.add_argument('''--lm_coef''', type=snake_case_, default=0.9 ) parser.add_argument('''--n_valid''', type=snake_case_, default=3_7_4 ) parser.add_argument('''--server_ip''', type=snake_case_, default='''''', help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''', type=snake_case_, default='''''', help='''Can be used for distant debugging.''' ) a = parser.parse_args() print(snake_case_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=snake_case_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) a = torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(snake_case_, snake_case_ ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset a = ['''_start_''', '''_delimiter_''', '''_classify_'''] a = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(snake_case_ ) a = tokenizer.convert_tokens_to_ids(snake_case_ ) a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(snake_case_ ) ) model.to(snake_case_ ) # Load and encode the datasets def tokenize_and_encode(snake_case_ ): if isinstance(snake_case_, snake_case_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case_ ) ) elif isinstance(snake_case_, snake_case_ ): return obj return [tokenize_and_encode(snake_case_ ) for o in obj] logger.info('''Encoding dataset...''' ) a = load_rocstories_dataset(args.train_dataset ) a = load_rocstories_dataset(args.eval_dataset ) a = (train_dataset, eval_dataset) a = tokenize_and_encode(snake_case_ ) # Compute the max input length for the Transformer a = model.config.n_positions // 2 - 2 a = max( len(story[:max_length] ) + max(len(conta[:max_length] ), len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) a = min(snake_case_, model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders a = pre_process_datasets(snake_case_, snake_case_, snake_case_, *snake_case_ ) a , a = tensor_datasets[0], tensor_datasets[1] a = TensorDataset(*snake_case_ ) a = RandomSampler(snake_case_ ) a = DataLoader(snake_case_, sampler=snake_case_, batch_size=args.train_batch_size ) a = TensorDataset(*snake_case_ ) a = SequentialSampler(snake_case_ ) a = DataLoader(snake_case_, sampler=snake_case_, batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: a = args.max_steps a = args.max_steps // (len(snake_case_ ) // args.gradient_accumulation_steps) + 1 else: a = len(snake_case_ ) // args.gradient_accumulation_steps * args.num_train_epochs a = list(model.named_parameters() ) a = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] a = [ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] a = AdamW(snake_case_, lr=args.learning_rate, eps=args.adam_epsilon ) a = get_linear_schedule_with_warmup( snake_case_, num_warmup_steps=args.warmup_steps, num_training_steps=snake_case_ ) if args.do_train: a , a , a = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ), desc='''Epoch''' ): a = 0 a = 0 a = tqdm(snake_case_, desc='''Training''' ) for step, batch in enumerate(snake_case_ ): a = tuple(t.to(snake_case_ ) for t in batch ) a , a , a , a = batch a = model(snake_case_, mc_token_ids=snake_case_, lm_labels=snake_case_, mc_labels=snake_case_ ) a = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() a = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 a = '''Training loss: {:.2e} lr: {:.2e}'''.format(snake_case_, scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer a = model.module if hasattr(snake_case_, '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` a = os.path.join(args.output_dir, snake_case_ ) a = os.path.join(args.output_dir, snake_case_ ) torch.save(model_to_save.state_dict(), snake_case_ ) model_to_save.config.to_json_file(snake_case_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) a = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(snake_case_ ) if args.do_eval: model.eval() a , a = 0, 0 a , a = 0, 0 for batch in tqdm(snake_case_, desc='''Evaluating''' ): a = tuple(t.to(snake_case_ ) for t in batch ) a , a , a , a = batch with torch.no_grad(): a , a , a , a = model( snake_case_, mc_token_ids=snake_case_, lm_labels=snake_case_, mc_labels=snake_case_ ) a = mc_logits.detach().cpu().numpy() a = mc_labels.to('''cpu''' ).numpy() a = accuracy(snake_case_, snake_case_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 a = eval_loss / nb_eval_steps a = eval_accuracy / nb_eval_examples a = tr_loss / nb_tr_steps if args.do_train else None a = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} a = os.path.join(args.output_dir, '''eval_results.txt''' ) with open(snake_case_, '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''', snake_case_, str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
350
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int: """simple docstring""" a = '''''' for i in table: res += inp[i - 1] return res def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" return data[1:] + data[0] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]: """simple docstring""" a = '''''' for i in range(len(snake_case_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = int('''0b''' + data[0] + data[-1], 2 ) a = int('''0b''' + data[1:3], 2 ) return bin(s[row][col] )[2:] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]: """simple docstring""" a = message[:4] a = message[4:] a = apply_table(snake_case_, snake_case_ ) a = xor(snake_case_, snake_case_ ) a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741 a = apply_sbox(snake_case_, temp[4:] ) a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741 a = '''0''' * (2 - len(snake_case_ )) + r a = apply_table(l + r, snake_case_ ) a = xor(snake_case_, snake_case_ ) return temp + right if __name__ == "__main__": UpperCamelCase__ : int = input("""Enter 10 bit key: """) UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """) UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9] UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] UpperCamelCase__ : Optional[int] = [2, 4, 3, 1] UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7] UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6] UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1] UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table) UpperCamelCase__ : str = temp[:5] UpperCamelCase__ : List[Any] = temp[5:] UpperCamelCase__ : Dict = left_shift(left) UpperCamelCase__ : Any = left_shift(right) UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table) UpperCamelCase__ : List[str] = left_shift(left) UpperCamelCase__ : int = left_shift(right) UpperCamelCase__ : List[str] = left_shift(left) UpperCamelCase__ : Dict = left_shift(right) UpperCamelCase__ : List[str] = apply_table(left + right, pa_table) # encryption UpperCamelCase__ : Tuple = apply_table(message, IP) UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4] UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Tuple = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP) UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4] UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Any = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
330
0
import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") UpperCamelCase__ : List[Any] = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) UpperCamelCase__ : List[str] = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) UpperCamelCase__ : Optional[int] = BeautifulSoup(res.text, """html.parser""") UpperCamelCase__ : List[Any] = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(F"https://google.com{link.get('href')}")
351
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) a = '''The dog is cute and lives in the garden house''' a = jnp.array([tokenizer.encode(__lowerCamelCase )] ) a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim a = jnp.array( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) a = model(__lowerCamelCase )['''last_hidden_state'''] self.assertEqual(output.shape ,__lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
330
0
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = ['input_values', 'padding_mask'] def __init__( self : List[Any] ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 2_40_00 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : float = None ,__lowerCamelCase : float = None ,**__lowerCamelCase : Tuple ,): '''simple docstring''' super().__init__(feature_size=__lowerCamelCase ,sampling_rate=__lowerCamelCase ,padding_value=__lowerCamelCase ,**__lowerCamelCase ) a = chunk_length_s a = overlap @property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : Optional[int] ,__lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,__lowerCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None ,__lowerCamelCase : Optional[bool] = False ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : Optional[int] = None ,): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of""" F""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with""" F""" {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if padding and truncation: raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' ) elif padding is None: # by default let's pad the inputs a = True a = bool( isinstance(__lowerCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: a = [np.asarray(__lowerCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__lowerCamelCase ,np.ndarray ): a = np.asarray(__lowerCamelCase ,dtype=np.floataa ) elif isinstance(__lowerCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): a = raw_audio.astype(np.floataa ) # always return batch if not is_batched: a = [np.asarray(__lowerCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__lowerCamelCase ): if example.ndim > 2: raise ValueError(F"""Expected input shape (channels, length) but got shape {example.shape}""" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F"""Expected mono audio but example has {example.shape[-1]} channels""" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F"""Expected stereo audio but example has {example.shape[-1]} channels""" ) a = None a = BatchFeature({'''input_values''': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: a = min(array.shape[0] for array in raw_audio ) a = int(np.floor(max_length / self.chunk_stride ) ) a = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: a = max(array.shape[0] for array in raw_audio ) a = int(np.ceil(max_length / self.chunk_stride ) ) a = (nb_step - 1) * self.chunk_stride + self.chunk_length a = '''max_length''' else: a = input_values # normal padding on batch if padded_inputs is None: a = self.pad( __lowerCamelCase ,max_length=__lowerCamelCase ,truncation=__lowerCamelCase ,padding=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,) if padding: a = padded_inputs.pop('''attention_mask''' ) a = [] for example in padded_inputs.pop('''input_values''' ): if self.feature_size == 1: a = example[..., None] input_values.append(example.T ) a = input_values if return_tensors is not None: a = padded_inputs.convert_to_tensors(__lowerCamelCase ) return padded_inputs
352
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ : Union[str, Any] = 16 UpperCamelCase__ : Dict = 32 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 1_6 ) -> Tuple: """simple docstring""" a = AutoTokenizer.from_pretrained('''bert-base-cased''' ) a = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(snake_case_ ): # max_length=None => use the model max length (it's actually the default) a = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a = datasets.map( snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(snake_case_ ): # On TPU it's best to pad everything to the same length or training will be very slow. a = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a = 1_6 elif accelerator.mixed_precision != "no": a = 8 else: a = None return tokenizer.pad( snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', ) # Instantiate dataloaders. a = DataLoader( tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) a = DataLoader( tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ : int = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1": a = 2 # Initialize accelerator a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config['''lr'''] a = int(config['''num_epochs'''] ) a = int(config['''seed'''] ) a = int(config['''batch_size'''] ) a = evaluate.load('''glue''', '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case_ ) def inner_training_loop(snake_case_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Instantiate optimizer a = AdamW(params=model.parameters(), lr=snake_case_ ) a , a = get_dataloaders(snake_case_, snake_case_ ) # Instantiate scheduler a = get_linear_schedule_with_warmup( optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a = model(**snake_case_ ) a = outputs.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a = model(**snake_case_ ) a = outputs.logits.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case_, references=snake_case_, ) a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""", snake_case_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: """simple docstring""" a = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' ) a = parser.parse_args() a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(snake_case_, snake_case_ ) if __name__ == "__main__": main()
330
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]: """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" class lowerCamelCase_ : def __init__( self : Dict ,__lowerCamelCase : List[str] ): '''simple docstring''' a = metric_id class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() ) @pytest.mark.parametrize( '''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple: """simple docstring""" if "tmp_path" in args: a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(snake_case_, match='''https://huggingface.co/docs/evaluate''' ): func(*snake_case_ )
353
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } UpperCamelCase__ : Optional[Any] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for attribute in key.split('''.''' ): a = getattr(snake_case_, snake_case_ ) if weight_type is not None: a = getattr(snake_case_, snake_case_ ).shape else: a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value else: a = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = [] a = fairseq_model.state_dict() a = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', ) a = True else: for key, mapped_key in MAPPING.items(): a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue a = True if "*" in mapped_key: a = name.split(snake_case_ )[0].split('''.''' )[-2] a = mapped_key.replace('''*''', snake_case_ ) if "weight_g" in name: a = '''weight_g''' elif "weight_v" in name: a = '''weight_v''' elif "bias" in name: a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj a = '''weight''' else: a = None set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = full_name.split('''conv_layers.''' )[-1] a = name.split('''.''' ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Union[str, Any]: """simple docstring""" if config_path is not None: a = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: a = UniSpeechSatConfig() a = '''''' if is_finetuned: a = UniSpeechSatForCTC(snake_case_ ) else: a = UniSpeechSatForPreTraining(snake_case_ ) a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) a = model[0].eval() recursively_load_weights(snake_case_, snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase__ : int = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
0
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } UpperCamelCase__ : Optional[Any] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for attribute in key.split('''.''' ): a = getattr(snake_case_, snake_case_ ) if weight_type is not None: a = getattr(snake_case_, snake_case_ ).shape else: a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value else: a = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = [] a = fairseq_model.state_dict() a = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', ) a = True else: for key, mapped_key in MAPPING.items(): a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue a = True if "*" in mapped_key: a = name.split(snake_case_ )[0].split('''.''' )[-2] a = mapped_key.replace('''*''', snake_case_ ) if "weight_g" in name: a = '''weight_g''' elif "weight_v" in name: a = '''weight_v''' elif "bias" in name: a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj a = '''weight''' else: a = None set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = full_name.split('''conv_layers.''' )[-1] a = name.split('''.''' ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Union[str, Any]: """simple docstring""" if config_path is not None: a = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: a = UniSpeechSatConfig() a = '''''' if is_finetuned: a = UniSpeechSatForCTC(snake_case_ ) else: a = UniSpeechSatForPreTraining(snake_case_ ) a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) a = model[0].eval() recursively_load_weights(snake_case_, snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase__ : int = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
354
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]: """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" class lowerCamelCase_ : def __init__( self : Dict ,__lowerCamelCase : List[str] ): '''simple docstring''' a = metric_id class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() ) @pytest.mark.parametrize( '''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple: """simple docstring""" if "tmp_path" in args: a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(snake_case_, match='''https://huggingface.co/docs/evaluate''' ): func(*snake_case_ )
330
0
"""simple docstring""" import os from collections.abc import Iterator def SCREAMING_SNAKE_CASE__ ( snake_case_ = "." ) -> Iterator[str]: """simple docstring""" for dir_path, dir_names, filenames in os.walk(snake_case_ ): a = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._'''] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(snake_case_ )[1] in (".py", ".ipynb"): yield os.path.join(snake_case_, snake_case_ ).lstrip('''./''' ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[Any]: """simple docstring""" return f"""{i * " "}*""" if i else "\n##" def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str: """simple docstring""" a = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(snake_case_ ) or old_parts[i] != new_part) and new_part: print(f"""{md_prefix(snake_case_ )} {new_part.replace("_", " " ).title()}""" ) return new_path def SCREAMING_SNAKE_CASE__ ( snake_case_ = "." ) -> None: """simple docstring""" a = '''''' for filepath in sorted(good_file_paths(snake_case_ ) ): a , a = os.path.split(snake_case_ ) if filepath != old_path: a = print_path(snake_case_, snake_case_ ) a = (filepath.count(os.sep ) + 1) if filepath else 0 a = f"""{filepath}/{filename}""".replace(''' ''', '''%20''' ) a = os.path.splitext(filename.replace('''_''', ''' ''' ).title() )[0] print(f"""{md_prefix(snake_case_ )} [{filename}]({url})""" ) if __name__ == "__main__": print_directory_md(""".""")
355
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'luke' def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase ) a = vocab_size a = entity_vocab_size a = hidden_size a = entity_emb_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = use_entity_aware_attention a = classifier_dropout
330
0
from __future__ import annotations from math import pow, sqrt def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> dict[str, float]: """simple docstring""" if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if resistance == 0: return {"resistance": sqrt(pow(snake_case_, 2 ) - pow(snake_case_, 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(snake_case_, 2 ) - pow(snake_case_, 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(snake_case_, 2 ) + pow(snake_case_, 2 ) )} else: raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
356
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": UpperCamelCase__ : Optional[int] = pd.read_csv("""sample_data.csv""", header=None) UpperCamelCase__ : Tuple = df.shape[:1][0] # If you're using some other dataset input the target column UpperCamelCase__ : List[Any] = df.iloc[:, 1:2] UpperCamelCase__ : Union[str, Any] = actual_data.values.reshape(len_data, 1) UpperCamelCase__ : List[Any] = MinMaxScaler().fit_transform(actual_data) UpperCamelCase__ : Optional[Any] = 10 UpperCamelCase__ : int = 5 UpperCamelCase__ : List[str] = 20 UpperCamelCase__ : Optional[int] = len_data - periods * look_back UpperCamelCase__ : Union[str, Any] = actual_data[:division] UpperCamelCase__ : str = actual_data[division - look_back :] UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = [], [] UpperCamelCase__ , UpperCamelCase__ : str = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) UpperCamelCase__ : List[str] = np.array(train_x) UpperCamelCase__ : Optional[Any] = np.array(test_x) UpperCamelCase__ : Tuple = np.array([list(i.ravel()) for i in train_y]) UpperCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y]) UpperCamelCase__ : Union[str, Any] = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") UpperCamelCase__ : Tuple = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) UpperCamelCase__ : Tuple = model.predict(x_test)
330
0
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowerCamelCase_ ( a_ ): def __init__( self : str ,__lowerCamelCase : pyspark.sql.DataFrame ,__lowerCamelCase : Optional[NamedSplit] = None ,__lowerCamelCase : Optional[Features] = None ,__lowerCamelCase : bool = True ,__lowerCamelCase : str = None ,__lowerCamelCase : bool = False ,__lowerCamelCase : str = None ,__lowerCamelCase : bool = True ,__lowerCamelCase : str = "arrow" ,**__lowerCamelCase : Optional[Any] ,): '''simple docstring''' super().__init__( split=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,keep_in_memory=__lowerCamelCase ,streaming=__lowerCamelCase ,**__lowerCamelCase ,) a = load_from_cache_file a = file_format a = Spark( df=__lowerCamelCase ,features=__lowerCamelCase ,cache_dir=__lowerCamelCase ,working_dir=__lowerCamelCase ,**__lowerCamelCase ,) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) a = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=__lowerCamelCase ,file_format=self._file_format ,) return self.builder.as_dataset(split=self.split )
357
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple: """simple docstring""" a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = 0.01 with locka.acquire(): with pytest.raises(snake_case_ ): a = time.time() locka.acquire(snake_case_ ) assert time.time() - _start > timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" a = '''a''' * 1_0_0_0 + '''.lock''' a = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(snake_case_ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 a = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(snake_case_ ): locka.acquire(0 )
330
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCamelCase__ : Union[str, Any] = { """configuration_altclip""": [ """ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AltCLIPConfig""", """AltCLIPTextConfig""", """AltCLIPVisionConfig""", ], """processing_altclip""": ["""AltCLIPProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Any = [ """ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """AltCLIPPreTrainedModel""", """AltCLIPModel""", """AltCLIPTextModel""", """AltCLIPVisionModel""", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys UpperCamelCase__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
358
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Dict = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'vit_mae' def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = decoder_num_attention_heads a = decoder_hidden_size a = decoder_num_hidden_layers a = decoder_intermediate_size a = mask_ratio a = norm_pix_loss
330
0
import logging import os import threading import time try: import warnings except ImportError: UpperCamelCase__ : List[Any] = None try: import msvcrt except ImportError: UpperCamelCase__ : Optional[Any] = None try: import fcntl except ImportError: UpperCamelCase__ : Optional[Any] = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: UpperCamelCase__ : List[str] = OSError # Data # ------------------------------------------------ UpperCamelCase__ : str = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] UpperCamelCase__ : str = """3.0.12""" UpperCamelCase__ : Optional[Any] = None def SCREAMING_SNAKE_CASE__ ( ) -> int: """simple docstring""" global _logger a = _logger or logging.getLogger(__name__ ) return _logger class lowerCamelCase_ ( a_ ): def __init__( self : Optional[Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = lock_file return None def __str__( self : List[Any] ): '''simple docstring''' a = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class lowerCamelCase_ : def __init__( self : Dict ,__lowerCamelCase : Tuple ): '''simple docstring''' a = lock return None def __enter__( self : int ): '''simple docstring''' return self.lock def __exit__( self : Dict ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[Any] ): '''simple docstring''' self.lock.release() return None class lowerCamelCase_ : def __init__( self : str ,__lowerCamelCase : List[str] ,__lowerCamelCase : Union[str, Any]=-1 ,__lowerCamelCase : Optional[int]=None ): '''simple docstring''' a = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long a = self.hash_filename_if_too_long(__lowerCamelCase ,__lowerCamelCase ) # The path to the lock file. a = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. a = None # The default timeout value. a = timeout # We use this lock primarily for the lock counter. a = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. a = 0 return None @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' return self._lock_file @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' return self._timeout @timeout.setter def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = float(__lowerCamelCase ) return None def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' raise NotImplementedError() def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' raise NotImplementedError() @property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' return self._lock_file_fd is not None def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Dict=None ,__lowerCamelCase : str=0.05 ): '''simple docstring''' if timeout is None: a = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 a = id(self ) a = self._lock_file a = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__lowerCamelCase ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: a = max(0 ,self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int=False ): '''simple docstring''' with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: a = id(self ) a = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() a = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self : Union[str, Any] ): '''simple docstring''' self.acquire() return self def __exit__( self : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' self.release() return None def __del__( self : Dict ): '''simple docstring''' self.release(force=__lowerCamelCase ) return None def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : str ,__lowerCamelCase : int ): '''simple docstring''' a = os.path.basename(__lowerCamelCase ) if len(__lowerCamelCase ) > max_length and max_length > 0: a = os.path.dirname(__lowerCamelCase ) a = str(hash(__lowerCamelCase ) ) a = filename[: max_length - len(__lowerCamelCase ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__lowerCamelCase ,__lowerCamelCase ) else: return path class lowerCamelCase_ ( a_ ): def __init__( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Any=-1 ,__lowerCamelCase : Tuple=None ): '''simple docstring''' from .file_utils import relative_to_absolute_path super().__init__(__lowerCamelCase ,timeout=__lowerCamelCase ,max_filename_length=__lowerCamelCase ) a = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: a = os.open(self._lock_file ,__lowerCamelCase ) except OSError: pass else: try: msvcrt.locking(__lowerCamelCase ,msvcrt.LK_NBLCK ,1 ) except OSError: os.close(__lowerCamelCase ) else: a = fd return None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = self._lock_file_fd a = None msvcrt.locking(__lowerCamelCase ,msvcrt.LK_UNLCK ,1 ) os.close(__lowerCamelCase ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class lowerCamelCase_ ( a_ ): def __init__( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Tuple=-1 ,__lowerCamelCase : Tuple=None ): '''simple docstring''' a = os.statvfs(os.path.dirname(__lowerCamelCase ) ).f_namemax super().__init__(__lowerCamelCase ,timeout=__lowerCamelCase ,max_filename_length=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = os.O_RDWR | os.O_CREAT | os.O_TRUNC a = os.open(self._lock_file ,__lowerCamelCase ) try: fcntl.flock(__lowerCamelCase ,fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__lowerCamelCase ) else: a = fd return None def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = self._lock_file_fd a = None fcntl.flock(__lowerCamelCase ,fcntl.LOCK_UN ) os.close(__lowerCamelCase ) return None class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: a = os.open(self._lock_file ,__lowerCamelCase ) except OSError: pass else: a = fd return None def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' os.close(self._lock_file_fd ) a = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None UpperCamelCase__ : Any = None if msvcrt: UpperCamelCase__ : Tuple = WindowsFileLock elif fcntl: UpperCamelCase__ : Any = UnixFileLock else: UpperCamelCase__ : Union[str, Any] = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
359
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]: """simple docstring""" stooge(snake_case_, 0, len(snake_case_ ) - 1 ) return arr def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]: """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: a , a = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: a = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) # Recursively sort last 2/3 elements stooge(snake_case_, i + t, (snake_case_) ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) if __name__ == "__main__": UpperCamelCase__ : Dict = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase__ : Optional[int] = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
330
0
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[int]: """simple docstring""" if num <= 0: raise ValueError('''Input must be a positive integer''' ) a = [True] * (num + 1) a = 2 while p * p <= num: if primes[p]: for i in range(p * p, num + 1, snake_case_ ): a = False p += 1 return [prime for prime in range(2, num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : Optional[Any] = int(input("""Enter a positive integer: """).strip()) print(prime_sieve_eratosthenes(user_num))
360
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } UpperCamelCase__ : Union[str, Any] = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } UpperCamelCase__ : str = { """jukebox""": 512, } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_LYRIC_TOKENS_SIZES SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=["v3", "v2", "v2"] ,__lowerCamelCase : List[Any]=5_12 ,__lowerCamelCase : Tuple=5 ,__lowerCamelCase : List[Any]="<|endoftext|>" ,**__lowerCamelCase : List[str] ,): '''simple docstring''' a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token super().__init__( unk_token=__lowerCamelCase ,n_genres=__lowerCamelCase ,version=__lowerCamelCase ,max_n_lyric_tokens=__lowerCamelCase ,**__lowerCamelCase ,) a = version a = max_n_lyric_tokens a = n_genres with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) a = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: a = oov.replace(r'''\-\'''' ,r'''\-+\'''' ) a = regex.compile(__lowerCamelCase ) a = {v: k for k, v in self.artists_encoder.items()} a = {v: k for k, v in self.genres_encoder.items()} a = {v: k for k, v in self.lyrics_encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ): '''simple docstring''' a = [self.artists_encoder.get(__lowerCamelCase ,0 ) for artist in list_artists] for genres in range(len(__lowerCamelCase ) ): a = [self.genres_encoder.get(__lowerCamelCase ,0 ) for genre in list_genres[genres]] a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) a = [[self.lyrics_encoder.get(__lowerCamelCase ,0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[str] ): '''simple docstring''' return list(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' a , a , a = self.prepare_for_tokenization(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = self._tokenize(__lowerCamelCase ) return artist, genre, lyrics def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": a = artists[idx].lower() a = [genres[idx].lower()] else: a = self._normalize(artists[idx] ) + '''.v2''' a = [ self._normalize(__lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' a = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )} a = 0 a = len(__lowerCamelCase ) + 1 a = self.vocab a = {v: k for k, v in self.vocab.items()} a = '''''' else: a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) a = self._run_strip_accents(__lowerCamelCase ) a = lyrics.replace('''\\''' ,'''\n''' ) a = self.out_of_vocab.sub('''''' ,__lowerCamelCase ), [], [] return artists, genres, lyrics def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ): '''simple docstring''' a = unicodedata.normalize('''NFD''' ,__lowerCamelCase ) a = [] for char in text: a = unicodedata.category(__lowerCamelCase ) if cat == "Mn": continue output.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ): '''simple docstring''' a = ( [chr(__lowerCamelCase ) for i in range(ord('''a''' ) ,ord('''z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''A''' ) ,ord('''Z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''0''' ) ,ord('''9''' ) + 1 )] + ['''.'''] ) a = frozenset(__lowerCamelCase ) a = re.compile(r'''_+''' ) a = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) a = pattern.sub('''_''' ,__lowerCamelCase ).strip('''_''' ) return text def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[str] ): '''simple docstring''' return " ".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : bool = False ): '''simple docstring''' if not isinstance(__lowerCamelCase ,__lowerCamelCase ): a = TensorType(__lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf a = tf.constant a = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch a = torch.tensor a = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 a = jnp.array a = _is_jax else: a = np.asarray a = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: a = [inputs] if not is_tensor(__lowerCamelCase ): a = as_tensor(__lowerCamelCase ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str]="" ,__lowerCamelCase : List[Any]="pt" ): '''simple docstring''' a = [0, 0, 0] a = [artist] * len(self.version ) a = [genres] * len(self.version ) a , a , a = self.tokenize(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a , a , a = self._convert_token_to_id(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = [-INFINITY] * len(full_tokens[-1] ) a = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=__lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=__lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : str ): '''simple docstring''' a = self.artists_decoder.get(__lowerCamelCase ) a = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index] a = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
330
0
from collections.abc import Sequence def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float: """simple docstring""" return sum(c * (x**i) for i, c in enumerate(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float: """simple docstring""" a = 0.0 for coeff in reversed(snake_case_ ): a = result * x + coeff return result if __name__ == "__main__": UpperCamelCase__ : List[Any] = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCamelCase__ : Union[str, Any] = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
361
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test UpperCamelCase__ : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab)))) UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ : Optional[Any] = Path(tmpdirname) UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) UpperCamelCase__ : Dict = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase__ : Union[str, Any] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""") UpperCamelCase__ : Tuple = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
330
0
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 4_0_0_0_0_0_0 ) -> int: """simple docstring""" a = [] a , a = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(snake_case_ ) a , a = b, a + b return sum(snake_case_ ) if __name__ == "__main__": print(F"{solution() = }")
362
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) UpperCamelCase__ : Optional[Any] = """bert-base-cased""" UpperCamelCase__ : int = """fp16""" UpperCamelCase__ : str = """bf16""" UpperCamelCase__ : List[Any] = [FPaa, BFaa] @require_fsdp @require_cuda class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' super().setUp() a = dict( ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = F"""{i + 1}""" a = strategy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = prefetch_policy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = state_dict_type with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = AutoModel.from_pretrained(__lowerCamelCase ) for policy in FSDP_AUTO_WRAP_POLICY: a = self.dist_env.copy() a = policy if policy == "TRANSFORMER_BASED_WRAP": a = '''BertLayer''' elif policy == "SIZE_BASED_WRAP": a = '''2000''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) a = self.dist_env.copy() a = '''TRANSFORMER_BASED_WRAP''' a = '''T5Layer''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() with self.assertRaises(__lowerCamelCase ) as cm: fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) ) a = self.dist_env.copy() a = '''SIZE_BASED_WRAP''' a = '''0''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: a = self.dist_env.copy() a = mp_dtype with mockenv_context(**__lowerCamelCase ): a = Accelerator() if mp_dtype == "fp16": a = torch.floataa elif mp_dtype == "bf16": a = torch.bfloataa a = MixedPrecision(param_dtype=__lowerCamelCase ,reduce_dtype=__lowerCamelCase ,buffer_dtype=__lowerCamelCase ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__lowerCamelCase ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,__lowerCamelCase ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: a = self.dist_env.copy() a = str(__lowerCamelCase ).lower() with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__lowerCamelCase ) ) @require_fsdp @require_multi_gpu @slow class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' super().setUp() a = 0.82 a = [ '''fsdp_shard_grad_op_transformer_based_wrap''', '''fsdp_full_shard_transformer_based_wrap''', ] a = { '''multi_gpu_fp16''': 32_00, '''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 20_00, '''fsdp_full_shard_transformer_based_wrap_fp16''': 19_00, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } a = 1_60 a = 1_60 a = inspect.getfile(accelerate.test_utils ) a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_performance.py''' ) a = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp'''] for config in self.performance_configs: a = cmd.copy() for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in config: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "fp32" in config: cmd_config.append('''--mixed_precision=no''' ) else: cmd_config.append('''--mixed_precision=fp16''' ) if "cpu_offload" in config: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--performance_lower_bound={self.performance_lower_bound}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''', '''--mixed_precision=fp16''', '''--fsdp_transformer_layer_cls_to_wrap=BertLayer''', ] for i, strategy in enumerate(__lowerCamelCase ): a = cmd.copy() cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) if strategy != "FULL_SHARD": continue a = len(__lowerCamelCase ) for state_dict_type in FSDP_STATE_DICT_TYPE: a = cmd_config[:state_dict_config_index] cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", '''--partial_train_epoch=1''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) a = cmd_config[:-1] a = os.path.join(self.tmpdir ,'''epoch_0''' ) cmd_config.extend( [ F"""--resume_from_checkpoint={resume_from_checkpoint}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): a = cmd.copy() if "fp16" in spec: cmd_config.extend(['''--mixed_precision=fp16'''] ) else: cmd_config.extend(['''--mixed_precision=no'''] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['''--use_fsdp'''] ) for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in spec: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "cpu_offload" in spec: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--peak_memory_upper_bound={peak_mem_upper_bound}""", F"""--n_train={self.n_train}""", F"""--n_val={self.n_val}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
330
0
"""simple docstring""" import re def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" if len(re.findall('''[ATCG]''', snake_case_ ) ) != len(snake_case_ ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
363
from __future__ import annotations import os from collections.abc import Mapping UpperCamelCase__ : Any = tuple[int, int] class lowerCamelCase_ : def __init__( self : Optional[Any] ,__lowerCamelCase : set[int] ,__lowerCamelCase : Mapping[EdgeT, int] ): '''simple docstring''' a = vertices a = { (min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items() } def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : EdgeT ,__lowerCamelCase : int ): '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) a = weight def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = Graph({min(self.vertices )} ,{} ) a = 42 a = 42 a = 42 a = 42 while len(subgraph.vertices ) < len(self.vertices ): a = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: a = edge a = weight subgraph.add_edge(__lowerCamelCase ,__lowerCamelCase ) return subgraph def SCREAMING_SNAKE_CASE__ ( snake_case_ = "p107_network.txt" ) -> int: """simple docstring""" a = os.path.abspath(os.path.dirname(snake_case_ ) ) a = os.path.join(snake_case_, snake_case_ ) a = {} a = 42 a = 42 a = 42 with open(snake_case_ ) as f: a = f.read().strip().split('''\n''' ) a = [line.split(''',''' ) for line in data] for edgea in range(1, len(snake_case_ ) ): for edgea in range(snake_case_ ): if adjaceny_matrix[edgea][edgea] != "-": a = int(adjaceny_matrix[edgea][edgea] ) a = Graph(set(range(len(snake_case_ ) ) ), snake_case_ ) a = graph.prims_algorithm() a = sum(graph.edges.values() ) a = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
330
0
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Optional[int]: """simple docstring""" a = args.log_outputs a = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] ) # load metric a = load_metric('''wer''' ) a = load_metric('''cer''' ) # compute metrics a = wer.compute(references=result['''target'''], predictions=result['''prediction'''] ) a = cer.compute(references=result['''target'''], predictions=result['''prediction'''] ) # print & log results a = f"""WER: {wer_result}\nCER: {cer_result}""" print(snake_case_ ) with open(f"""{dataset_id}_eval_results.txt""", '''w''' ) as f: f.write(snake_case_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: a = f"""log_{dataset_id}_predictions.txt""" a = f"""log_{dataset_id}_targets.txt""" with open(snake_case_, '''w''' ) as p, open(snake_case_, '''w''' ) as t: # mapping function to write output def write_to_file(snake_case_, snake_case_ ): p.write(f"""{i}""" + '''\n''' ) p.write(batch['''prediction'''] + '''\n''' ) t.write(f"""{i}""" + '''\n''' ) t.write(batch['''target'''] + '''\n''' ) result.map(snake_case_, with_indices=snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" a = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training a = re.sub(snake_case_, '''''', text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! a = ['''\n\n''', '''\n''', ''' ''', ''' '''] for t in token_sequences_to_ignore: a = ''' '''.join(text.split(snake_case_ ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]: """simple docstring""" a = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=snake_case_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor a = AutoFeatureExtractor.from_pretrained(args.model_id ) a = feature_extractor.sampling_rate # resample audio a = dataset.cast_column('''audio''', Audio(sampling_rate=snake_case_ ) ) # load eval pipeline if args.device is None: a = 0 if torch.cuda.is_available() else -1 a = pipeline('''automatic-speech-recognition''', model=args.model_id, device=args.device ) # map function to decode audio def map_to_pred(snake_case_ ): a = asr( batch['''audio''']['''array'''], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s ) a = prediction['''text'''] a = normalize_text(batch['''sentence'''] ) return batch # run inference on all examples a = dataset.map(snake_case_, remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case_, snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) UpperCamelCase__ : Dict = parser.parse_args() main(args)
364
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase__ : List[Any] = logging.get_logger(__name__) # General docstring UpperCamelCase__ : List[Any] = """RegNetConfig""" # Base docstring UpperCamelCase__ : Dict = """facebook/regnet-y-040""" UpperCamelCase__ : int = [1, 1_088, 7, 7] # Image classification docstring UpperCamelCase__ : Optional[Any] = """facebook/regnet-y-040""" UpperCamelCase__ : Dict = """tabby, tabby cat""" UpperCamelCase__ : Dict = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : Optional[str] = "relu" ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=__lowerCamelCase ,strides=__lowerCamelCase ,padding='''VALID''' ,groups=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ,) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) a = ACTaFN[activation] if activation is not None else tf.identity def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : List[str] ): '''simple docstring''' a = self.convolution(self.padding(__lowerCamelCase ) ) a = self.normalization(__lowerCamelCase ) a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Any ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : List[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config.num_channels a = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='''embedder''' ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = shape_list(__lowerCamelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) a = tf.transpose(__lowerCamelCase ,perm=(0, 2, 3, 1) ) a = self.embedder(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : str ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Tuple ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=1 ,strides=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ): '''simple docstring''' return self.normalization(self.convolution(__lowerCamelCase ) ,training=__lowerCamelCase ) class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : int ,**__lowerCamelCase : str ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) a = [ tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''relu''' ,name='''attention.0''' ), tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''sigmoid''' ,name='''attention.2''' ), ] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = self.pooler(__lowerCamelCase ) for layer_module in self.attention: a = layer_module(__lowerCamelCase ) a = hidden_state * pooled return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : Dict ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.2''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : List[str] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetSELayer(__lowerCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ,name='''layer.2''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.3''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer a = [ # downsampling is done in the first layer with stride of 2 layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ,name='''layers.0''' ), *[layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ): '''simple docstring''' for layer_module in self.layers: a = layer_module(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __lowerCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='''stages.0''' ,) ) a = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCamelCase ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,depth=__lowerCamelCase ,name=F"""stages.{i+1}""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = True ): '''simple docstring''' a = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: a = hidden_states + (hidden_state,) a = stage_module(__lowerCamelCase ) if output_hidden_states: a = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase ,hidden_states=__lowerCamelCase ) @keras_serializable class lowerCamelCase_ ( tf.keras.layers.Layer ): SCREAMING_SNAKE_CASE_ = RegNetConfig def __init__( self : Dict ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config a = TFRegNetEmbeddings(__lowerCamelCase ,name='''embedder''' ) a = TFRegNetEncoder(__lowerCamelCase ,name='''encoder''' ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) @unpack_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : bool = False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.embedder(__lowerCamelCase ,training=__lowerCamelCase ) a = self.encoder( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = encoder_outputs[0] a = self.pooler(__lowerCamelCase ) # Change to NCHW output format have uniformity in the modules a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: a = tuple([tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = RegNetConfig SCREAMING_SNAKE_CASE_ = 'regnet' SCREAMING_SNAKE_CASE_ = 'pixel_values' @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} UpperCamelCase__ : Union[str, Any] = R""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCamelCase__ : List[str] = R""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , a_ , ) class lowerCamelCase_ ( a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : int ,**__lowerCamelCase : Union[str, Any] ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : List[str]=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( pixel_values=__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a_ , ) class lowerCamelCase_ ( a_ , a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : str ,**__lowerCamelCase : Any ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = config.num_labels a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) # classification head a = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Dict=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = outputs.pooler_output if return_dict else outputs[1] a = self.classifier[0](__lowerCamelCase ) a = self.classifier[1](__lowerCamelCase ) a = None if labels is None else self.hf_compute_loss(labels=__lowerCamelCase ,logits=__lowerCamelCase ) if not return_dict: a = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states )
330
0
import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) a = AutoTokenizer.from_pretrained('''google/mt5-small''' ) a = tokenizer('''Hello there''' ,return_tensors='''np''' ).input_ids a = tokenizer('''Hi I am''' ,return_tensors='''np''' ).input_ids a = shift_tokens_right(__lowerCamelCase ,model.config.pad_token_id ,model.config.decoder_start_token_id ) a = model(__lowerCamelCase ,decoder_input_ids=__lowerCamelCase ).logits a = optax.softmax_cross_entropy(__lowerCamelCase ,onehot(__lowerCamelCase ,logits.shape[-1] ) ).mean() a = -(labels.shape[-1] * loss.item()) a = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
365
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'efficientformer' def __init__( self : Optional[int] ,__lowerCamelCase : List[int] = [3, 2, 6, 4] ,__lowerCamelCase : List[int] = [48, 96, 2_24, 4_48] ,__lowerCamelCase : List[bool] = [True, True, True, True] ,__lowerCamelCase : int = 4_48 ,__lowerCamelCase : int = 32 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : int = 7 ,__lowerCamelCase : int = 5 ,__lowerCamelCase : int = 8 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 16 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : bool = True ,__lowerCamelCase : bool = True ,__lowerCamelCase : float = 1e-5 ,__lowerCamelCase : str = "gelu" ,__lowerCamelCase : float = 0.02 ,__lowerCamelCase : float = 1e-12 ,__lowerCamelCase : int = 2_24 ,__lowerCamelCase : float = 1e-05 ,**__lowerCamelCase : Dict ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_act a = hidden_dropout_prob a = hidden_sizes a = num_hidden_layers a = num_attention_heads a = initializer_range a = layer_norm_eps a = patch_size a = num_channels a = depths a = mlp_expansion_ratio a = downsamples a = dim a = key_dim a = attention_ratio a = resolution a = pool_size a = downsample_patch_size a = downsample_stride a = downsample_pad a = drop_path_rate a = num_metaad_blocks a = distillation a = use_layer_scale a = layer_scale_init_value a = image_size a = batch_norm_eps
330
0
from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count += 1 a = '''_''' if count > 1: return False else: return "".join(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]: """simple docstring""" a = [] while True: a = ['''$'''] * len(snake_case_ ) a = [] for i in range(len(snake_case_ ) ): for j in range(i + 1, len(snake_case_ ) ): a = compare_string(binary[i], binary[j] ) if k is False: a = '''*''' a = '''*''' temp.append('''X''' ) for i in range(len(snake_case_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(snake_case_ ) == 0: return pi a = list(set(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] for minterm in minterms: a = '''''' for _ in range(snake_case_ ): a = str(minterm % 2 ) + string minterm //= 2 temp.append(snake_case_ ) return temp def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] a = [0] * len(snake_case_ ) for i in range(len(chart[0] ) ): a = 0 a = -1 for j in range(len(snake_case_ ) ): if chart[j][i] == 1: count += 1 a = j if count == 1: a = 1 for i in range(len(snake_case_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(snake_case_ ) ): a = 0 temp.append(prime_implicants[i] ) while True: a = 0 a = -1 a = 0 for i in range(len(snake_case_ ) ): a = chart[i].count(1 ) if count_n > max_n: a = count_n a = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(snake_case_ ) ): a = 0 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]: """simple docstring""" a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )] for i in range(len(snake_case_ ) ): a = prime_implicants[i].count('''_''' ) for j in range(len(snake_case_ ) ): if is_for_table(prime_implicants[i], binary[j], snake_case_ ): a = 1 return chart def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" a = int(input('''Enter the no. of variables\n''' ) ) a = [ float(snake_case_ ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] a = decimal_to_binary(snake_case_, snake_case_ ) a = check(snake_case_ ) print('''Prime Implicants are:''' ) print(snake_case_ ) a = prime_implicant_chart(snake_case_, snake_case_ ) a = selection(snake_case_, snake_case_ ) print('''Essential Prime Implicants are:''' ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
366
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCamelCase__ : Any = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] UpperCamelCase__ : Optional[Any] = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] UpperCamelCase__ : Optional[Any] = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : List[str] = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : Optional[int] = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for tf_name, hf_name in patterns: a = k.replace(snake_case_, snake_case_ ) return k def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> BigBirdPegasusForConditionalGeneration: """simple docstring""" a = BigBirdPegasusConfig(**snake_case_ ) a = BigBirdPegasusForConditionalGeneration(snake_case_ ) a = torch_model.state_dict() a = {} # separating decoder weights a = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} a = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = DECODER_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = REMAINING_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" a = mapping['''model.embed_positions.weight'''] a = mapping.pop('''model.embed_positions.weight''' ) a , a = torch_model.load_state_dict(snake_case_, strict=snake_case_ ) a = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict: """simple docstring""" a = tf.train.list_variables(snake_case_ ) a = {} a = ['''global_step'''] for name, shape in tqdm(snake_case_, desc='''converting tf checkpoint to dict''' ): a = any(pat in name for pat in ignore_name ) if skip_key: continue a = tf.train.load_variable(snake_case_, snake_case_ ) a = array return tf_weights def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int: """simple docstring""" a = get_tf_weights_as_numpy(snake_case_ ) a = convert_bigbird_pegasus(snake_case_, snake_case_ ) torch_model.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCamelCase__ : int = parser.parse_args() UpperCamelCase__ : Tuple = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
0
UpperCamelCase__ : dict[str, float] = { "joule": 1.0, "kilojoule": 1_000, "megajoule": 1_000_000, "gigajoule": 1_000_000_000, "wattsecond": 1.0, "watthour": 3_600, "kilowatthour": 3_600_000, "newtonmeter": 1.0, "calorie_nutr": 4_186.8, "kilocalorie_nutr": 4_186_800.00, "electronvolt": 1.602176634E-19, "britishthermalunit_it": 1_055.05_585, "footpound": 1.3_5_5_8_1_8, } def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> float: """simple docstring""" if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION: a = ( f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n""" f"""Valid values are: {", ".join(snake_case_ )}""" ) raise ValueError(snake_case_ ) return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type] if __name__ == "__main__": import doctest doctest.testmod()
367
import re def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" if len(re.findall('''[ATCG]''', snake_case_ ) ) != len(snake_case_ ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
330
0
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple: """simple docstring""" a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = 0.01 with locka.acquire(): with pytest.raises(snake_case_ ): a = time.time() locka.acquire(snake_case_ ) assert time.time() - _start > timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" a = '''a''' * 1_0_0_0 + '''.lock''' a = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(snake_case_ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 a = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(snake_case_ ): locka.acquire(0 )
368
from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count += 1 a = '''_''' if count > 1: return False else: return "".join(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]: """simple docstring""" a = [] while True: a = ['''$'''] * len(snake_case_ ) a = [] for i in range(len(snake_case_ ) ): for j in range(i + 1, len(snake_case_ ) ): a = compare_string(binary[i], binary[j] ) if k is False: a = '''*''' a = '''*''' temp.append('''X''' ) for i in range(len(snake_case_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(snake_case_ ) == 0: return pi a = list(set(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] for minterm in minterms: a = '''''' for _ in range(snake_case_ ): a = str(minterm % 2 ) + string minterm //= 2 temp.append(snake_case_ ) return temp def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] a = [0] * len(snake_case_ ) for i in range(len(chart[0] ) ): a = 0 a = -1 for j in range(len(snake_case_ ) ): if chart[j][i] == 1: count += 1 a = j if count == 1: a = 1 for i in range(len(snake_case_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(snake_case_ ) ): a = 0 temp.append(prime_implicants[i] ) while True: a = 0 a = -1 a = 0 for i in range(len(snake_case_ ) ): a = chart[i].count(1 ) if count_n > max_n: a = count_n a = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(snake_case_ ) ): a = 0 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]: """simple docstring""" a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )] for i in range(len(snake_case_ ) ): a = prime_implicants[i].count('''_''' ) for j in range(len(snake_case_ ) ): if is_for_table(prime_implicants[i], binary[j], snake_case_ ): a = 1 return chart def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" a = int(input('''Enter the no. of variables\n''' ) ) a = [ float(snake_case_ ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] a = decimal_to_binary(snake_case_, snake_case_ ) a = check(snake_case_ ) print('''Prime Implicants are:''' ) print(snake_case_ ) a = prime_implicant_chart(snake_case_, snake_case_ ) a = selection(snake_case_, snake_case_ ) print('''Essential Prime Implicants are:''' ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
330
0
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class lowerCamelCase_ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Dict ,__lowerCamelCase : List[str] ): '''simple docstring''' return F"""gaussian_noise_s={seed}_shape={"_".join([str(__lowerCamelCase ) for s in shape] )}.npy""" def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' super().tearDown() gc.collect() def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : Tuple=0 ,__lowerCamelCase : Any=(4, 4, 64, 64) ,__lowerCamelCase : int=False ): '''simple docstring''' a = jnp.bfloataa if fpaa else jnp.floataa a = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase ,__lowerCamelCase ) ) ,dtype=__lowerCamelCase ) return image def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" ): '''simple docstring''' a = jnp.bfloataa if fpaa else jnp.floataa a = '''bf16''' if fpaa else None a , a = FlaxUNetaDConditionModel.from_pretrained( __lowerCamelCase ,subfolder='''unet''' ,dtype=__lowerCamelCase ,revision=__lowerCamelCase ) return model, params def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Tuple=0 ,__lowerCamelCase : int=(4, 77, 7_68) ,__lowerCamelCase : Tuple=False ): '''simple docstring''' a = jnp.bfloataa if fpaa else jnp.floataa a = jnp.array(load_hf_numpy(self.get_file_format(__lowerCamelCase ,__lowerCamelCase ) ) ,dtype=__lowerCamelCase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]], [17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]], [8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]], [3, 10_00, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Tuple ): '''simple docstring''' a , a = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' ,fpaa=__lowerCamelCase ) a = self.get_latents(__lowerCamelCase ,fpaa=__lowerCamelCase ) a = self.get_encoder_hidden_states(__lowerCamelCase ,fpaa=__lowerCamelCase ) a = model.apply( {'''params''': params} ,__lowerCamelCase ,jnp.array(__lowerCamelCase ,dtype=jnp.intaa ) ,encoder_hidden_states=__lowerCamelCase ,).sample assert sample.shape == latents.shape a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa ) a = jnp.array(__lowerCamelCase ,dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]], [17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]], [8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]], [3, 10_00, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]], # fmt: on ] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[str] ): '''simple docstring''' a , a = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' ,fpaa=__lowerCamelCase ) a = self.get_latents(__lowerCamelCase ,shape=(4, 4, 96, 96) ,fpaa=__lowerCamelCase ) a = self.get_encoder_hidden_states(__lowerCamelCase ,shape=(4, 77, 10_24) ,fpaa=__lowerCamelCase ) a = model.apply( {'''params''': params} ,__lowerCamelCase ,jnp.array(__lowerCamelCase ,dtype=jnp.intaa ) ,encoder_hidden_states=__lowerCamelCase ,).sample assert sample.shape == latents.shape a = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa ) a = jnp.array(__lowerCamelCase ,dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(__lowerCamelCase ,__lowerCamelCase ,atol=1e-2 )
369
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCamelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a_ ) class lowerCamelCase_ ( a_ ): def __init__( self : int ,*__lowerCamelCase : str ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(*__lowerCamelCase ,**__lowerCamelCase ) requires_backends(self ,'''vision''' ) self.check_model_type(__lowerCamelCase ) def __call__( self : int ,__lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**__lowerCamelCase : str ): '''simple docstring''' return super().__call__(__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,**__lowerCamelCase : Dict ): '''simple docstring''' return {}, {}, {} def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = load_image(__lowerCamelCase ) a = image.size a = self.image_processor(images=__lowerCamelCase ,return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = self.model(**__lowerCamelCase ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = model_outputs.predicted_depth a = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=__lowerCamelCase ) a = prediction.squeeze().cpu().numpy() a = (output * 2_55 / np.max(__lowerCamelCase )).astype('''uint8''' ) a = Image.fromarray(__lowerCamelCase ) a = {} a = predicted_depth a = depth return output_dict
330
0
import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor UpperCamelCase__ = logging.get_logger(__name__) class lowerCamelCase_ ( a_ ): def __init__( self : Union[str, Any] ,*__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[int] ): '''simple docstring''' warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' ,__lowerCamelCase ,) super().__init__(*__lowerCamelCase ,**__lowerCamelCase )
370
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a_ ) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} ) SCREAMING_SNAKE_CASE_ = Features({} ) SCREAMING_SNAKE_CASE_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' return {self.text_column: "text"}
330
0
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase__ : List[Any] = logging.get_logger(__name__) # General docstring UpperCamelCase__ : List[Any] = """RegNetConfig""" # Base docstring UpperCamelCase__ : Dict = """facebook/regnet-y-040""" UpperCamelCase__ : int = [1, 1_088, 7, 7] # Image classification docstring UpperCamelCase__ : Optional[Any] = """facebook/regnet-y-040""" UpperCamelCase__ : Dict = """tabby, tabby cat""" UpperCamelCase__ : Dict = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : Optional[str] = "relu" ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=__lowerCamelCase ,strides=__lowerCamelCase ,padding='''VALID''' ,groups=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ,) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) a = ACTaFN[activation] if activation is not None else tf.identity def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : List[str] ): '''simple docstring''' a = self.convolution(self.padding(__lowerCamelCase ) ) a = self.normalization(__lowerCamelCase ) a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Any ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : List[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config.num_channels a = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='''embedder''' ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = shape_list(__lowerCamelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) a = tf.transpose(__lowerCamelCase ,perm=(0, 2, 3, 1) ) a = self.embedder(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : str ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Tuple ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=1 ,strides=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ): '''simple docstring''' return self.normalization(self.convolution(__lowerCamelCase ) ,training=__lowerCamelCase ) class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : int ,**__lowerCamelCase : str ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) a = [ tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''relu''' ,name='''attention.0''' ), tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''sigmoid''' ,name='''attention.2''' ), ] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = self.pooler(__lowerCamelCase ) for layer_module in self.attention: a = layer_module(__lowerCamelCase ) a = hidden_state * pooled return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : Dict ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.2''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : List[str] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetSELayer(__lowerCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ,name='''layer.2''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.3''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer a = [ # downsampling is done in the first layer with stride of 2 layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ,name='''layers.0''' ), *[layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ): '''simple docstring''' for layer_module in self.layers: a = layer_module(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __lowerCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='''stages.0''' ,) ) a = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCamelCase ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,depth=__lowerCamelCase ,name=F"""stages.{i+1}""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = True ): '''simple docstring''' a = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: a = hidden_states + (hidden_state,) a = stage_module(__lowerCamelCase ) if output_hidden_states: a = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase ,hidden_states=__lowerCamelCase ) @keras_serializable class lowerCamelCase_ ( tf.keras.layers.Layer ): SCREAMING_SNAKE_CASE_ = RegNetConfig def __init__( self : Dict ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config a = TFRegNetEmbeddings(__lowerCamelCase ,name='''embedder''' ) a = TFRegNetEncoder(__lowerCamelCase ,name='''encoder''' ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) @unpack_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : bool = False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.embedder(__lowerCamelCase ,training=__lowerCamelCase ) a = self.encoder( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = encoder_outputs[0] a = self.pooler(__lowerCamelCase ) # Change to NCHW output format have uniformity in the modules a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: a = tuple([tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = RegNetConfig SCREAMING_SNAKE_CASE_ = 'regnet' SCREAMING_SNAKE_CASE_ = 'pixel_values' @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} UpperCamelCase__ : Union[str, Any] = R""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCamelCase__ : List[str] = R""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , a_ , ) class lowerCamelCase_ ( a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : int ,**__lowerCamelCase : Union[str, Any] ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : List[str]=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( pixel_values=__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a_ , ) class lowerCamelCase_ ( a_ , a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : str ,**__lowerCamelCase : Any ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = config.num_labels a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) # classification head a = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Dict=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = outputs.pooler_output if return_dict else outputs[1] a = self.classifier[0](__lowerCamelCase ) a = self.classifier[1](__lowerCamelCase ) a = None if labels is None else self.hf_compute_loss(labels=__lowerCamelCase ,logits=__lowerCamelCase ) if not return_dict: a = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states )
371
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'yolos' def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = num_detection_tokens a = use_mid_position_embeddings a = auxiliary_loss # Hungarian matcher a = class_cost a = bbox_cost a = giou_cost # Loss coefficients a = bbox_loss_coefficient a = giou_loss_coefficient a = eos_coefficient class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' return 1e-4 @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return 12
330
0
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = BlenderbotSmallConfig SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = 'gelu' def __init__( self : Any ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=13 ,__lowerCamelCase : str=7 ,__lowerCamelCase : Optional[int]=True ,__lowerCamelCase : Tuple=False ,__lowerCamelCase : Tuple=99 ,__lowerCamelCase : Dict=32 ,__lowerCamelCase : Dict=2 ,__lowerCamelCase : Optional[Any]=4 ,__lowerCamelCase : Union[str, Any]=37 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Optional[int]=0.1 ,__lowerCamelCase : str=20 ,__lowerCamelCase : List[str]=2 ,__lowerCamelCase : Dict=1 ,__lowerCamelCase : List[Any]=0 ,): '''simple docstring''' a = parent a = batch_size a = seq_length a = is_training a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = eos_token_id a = pad_token_id a = bos_token_id def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 ) a = tf.concat([input_ids, eos_tensor] ,axis=1 ) a = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) a = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) a = prepare_blenderbot_small_inputs_dict(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : List[str] ): '''simple docstring''' a = TFBlenderbotSmallModel(config=__lowerCamelCase ).get_decoder() a = inputs_dict['''input_ids'''] a = input_ids[:1, :] a = inputs_dict['''attention_mask'''][:1, :] a = inputs_dict['''head_mask'''] a = 1 # first forward pass a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,head_mask=__lowerCamelCase ,use_cache=__lowerCamelCase ) a , a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids a = ids_tensor((self.batch_size, 3) ,config.vocab_size ) a = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta ) # append to next input_ids and a = tf.concat([input_ids, next_tokens] ,axis=-1 ) a = tf.concat([attention_mask, next_attn_mask] ,axis=-1 ) a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )[0] a = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,past_key_values=__lowerCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] ) # select random slice a = int(ids_tensor((1,) ,output_from_past.shape[-1] ) ) a = output_from_no_past[:, -3:, random_slice_idx] a = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=None, snake_case_=None, snake_case_=None, ) -> Any: """simple docstring""" if attention_mask is None: a = tf.cast(tf.math.not_equal(snake_case_, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: a = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: a = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: a = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowerCamelCase_ ( a_ , a_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) SCREAMING_SNAKE_CASE_ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () SCREAMING_SNAKE_CASE_ = ( { 'conversational': TFBlenderbotSmallForConditionalGeneration, 'feature-extraction': TFBlenderbotSmallModel, 'summarization': TFBlenderbotSmallForConditionalGeneration, 'text2text-generation': TFBlenderbotSmallForConditionalGeneration, 'translation': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = TFBlenderbotSmallModelTester(self ) a = ConfigTester(self ,config_class=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__lowerCamelCase ) @require_tokenizers @require_tf class lowerCamelCase_ ( unittest.TestCase ): SCREAMING_SNAKE_CASE_ = [ 'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ' ' i\'m going to throw up.\nand why is that?' ] SCREAMING_SNAKE_CASE_ = 'facebook/blenderbot_small-90M' @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = self.tokenizer(self.src_text ,return_tensors='''tf''' ) a = self.model.generate( model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=__lowerCamelCase ,) a = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=__lowerCamelCase )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
350
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int: """simple docstring""" a = '''''' for i in table: res += inp[i - 1] return res def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" return data[1:] + data[0] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]: """simple docstring""" a = '''''' for i in range(len(snake_case_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = int('''0b''' + data[0] + data[-1], 2 ) a = int('''0b''' + data[1:3], 2 ) return bin(s[row][col] )[2:] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]: """simple docstring""" a = message[:4] a = message[4:] a = apply_table(snake_case_, snake_case_ ) a = xor(snake_case_, snake_case_ ) a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741 a = apply_sbox(snake_case_, temp[4:] ) a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741 a = '''0''' * (2 - len(snake_case_ )) + r a = apply_table(l + r, snake_case_ ) a = xor(snake_case_, snake_case_ ) return temp + right if __name__ == "__main__": UpperCamelCase__ : int = input("""Enter 10 bit key: """) UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """) UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9] UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] UpperCamelCase__ : Optional[int] = [2, 4, 3, 1] UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7] UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6] UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1] UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table) UpperCamelCase__ : str = temp[:5] UpperCamelCase__ : List[Any] = temp[5:] UpperCamelCase__ : Dict = left_shift(left) UpperCamelCase__ : Any = left_shift(right) UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table) UpperCamelCase__ : List[str] = left_shift(left) UpperCamelCase__ : int = left_shift(right) UpperCamelCase__ : List[str] = left_shift(left) UpperCamelCase__ : Dict = left_shift(right) UpperCamelCase__ : List[str] = apply_table(left + right, pa_table) # encryption UpperCamelCase__ : Tuple = apply_table(message, IP) UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4] UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Tuple = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP) UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4] UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Any = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
330
0
import argparse from collections import defaultdict import yaml UpperCamelCase__ : Tuple = """docs/source/en/_toctree.yml""" def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]: """simple docstring""" a = defaultdict(snake_case_ ) for doc in model_doc: counts[doc["local"]] += 1 a = [key for key, value in counts.items() if value > 1] a = [] for duplicate_key in duplicates: a = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(snake_case_ ) > 1: raise ValueError( f"""{duplicate_key} is present several times in the documentation table of content at """ '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(snake_case_, key=lambda snake_case_ : s["title"].lower() ) def SCREAMING_SNAKE_CASE__ ( snake_case_=False ) -> List[str]: """simple docstring""" with open(snake_case_, encoding='''utf-8''' ) as f: a = yaml.safe_load(f.read() ) # Get to the API doc a = 0 while content[api_idx]["title"] != "API": api_idx += 1 a = content[api_idx]['''sections'''] # Then to the model doc a = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 a = api_doc[model_idx]['''sections'''] a = [(idx, section) for idx, section in enumerate(snake_case_ ) if '''sections''' in section] a = False for idx, modality_doc in modalities_docs: a = modality_doc['''sections'''] a = clean_model_doc_toc(snake_case_ ) if old_modality_doc != new_modality_doc: a = True if overwrite: a = new_modality_doc if diff: if overwrite: a = model_doc a = api_doc with open(snake_case_, '''w''', encoding='''utf-8''' ) as f: f.write(yaml.dump(snake_case_, allow_unicode=snake_case_ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": UpperCamelCase__ : List[Any] = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") UpperCamelCase__ : int = parser.parse_args() check_model_doc(args.fix_and_overwrite)
351
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) a = '''The dog is cute and lives in the garden house''' a = jnp.array([tokenizer.encode(__lowerCamelCase )] ) a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim a = jnp.array( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) a = model(__lowerCamelCase )['''last_hidden_state'''] self.assertEqual(output.shape ,__lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
330
0
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = (DDPMScheduler,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,**__lowerCamelCase : Tuple ): '''simple docstring''' a = { '''num_train_timesteps''': 10_00, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**__lowerCamelCase ) return config def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=__lowerCamelCase ,beta_end=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' self.check_over_configs(thresholding=__lowerCamelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__lowerCamelCase ,prediction_type=__lowerCamelCase ,sample_max_value=__lowerCamelCase ,) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = len(__lowerCamelCase ) a = self.dummy_model() a = self.dummy_sample_deter a = torch.manual_seed(0 ) for t in reversed(range(__lowerCamelCase ) ): # 1. predict noise residual a = model(__lowerCamelCase ,__lowerCamelCase ) # 2. predict previous mean of sample x_t-1 a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,generator=__lowerCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance a = pred_prev_sample a = torch.sum(torch.abs(__lowerCamelCase ) ) a = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 258.9_606 ) < 1e-2 assert abs(result_mean.item() - 0.3_372 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config(prediction_type='''v_prediction''' ) a = scheduler_class(**__lowerCamelCase ) a = len(__lowerCamelCase ) a = self.dummy_model() a = self.dummy_sample_deter a = torch.manual_seed(0 ) for t in reversed(range(__lowerCamelCase ) ): # 1. predict noise residual a = model(__lowerCamelCase ,__lowerCamelCase ) # 2. predict previous mean of sample x_t-1 a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,generator=__lowerCamelCase ).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance a = pred_prev_sample a = torch.sum(torch.abs(__lowerCamelCase ) ) a = torch.mean(torch.abs(__lowerCamelCase ) ) assert abs(result_sum.item() - 202.0_296 ) < 1e-2 assert abs(result_mean.item() - 0.2_631 ) < 1e-3 def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__lowerCamelCase ) a = scheduler.timesteps for i, timestep in enumerate(__lowerCamelCase ): if i == len(__lowerCamelCase ) - 1: a = -1 else: a = timesteps[i + 1] a = scheduler.previous_timestep(__lowerCamelCase ) a = prev_t.item() self.assertEqual(__lowerCamelCase ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = [1_00, 87, 50, 51, 0] with self.assertRaises(__lowerCamelCase ,msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = [1_00, 87, 50, 1, 0] a = len(__lowerCamelCase ) with self.assertRaises(__lowerCamelCase ,msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=__lowerCamelCase ,timesteps=__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = self.scheduler_classes[0] a = self.get_scheduler_config() a = scheduler_class(**__lowerCamelCase ) a = [scheduler.config.num_train_timesteps] with self.assertRaises( __lowerCamelCase ,msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' ,): scheduler.set_timesteps(timesteps=__lowerCamelCase )
352
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ : Union[str, Any] = 16 UpperCamelCase__ : Dict = 32 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 1_6 ) -> Tuple: """simple docstring""" a = AutoTokenizer.from_pretrained('''bert-base-cased''' ) a = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(snake_case_ ): # max_length=None => use the model max length (it's actually the default) a = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a = datasets.map( snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(snake_case_ ): # On TPU it's best to pad everything to the same length or training will be very slow. a = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a = 1_6 elif accelerator.mixed_precision != "no": a = 8 else: a = None return tokenizer.pad( snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', ) # Instantiate dataloaders. a = DataLoader( tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) a = DataLoader( tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ : int = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1": a = 2 # Initialize accelerator a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config['''lr'''] a = int(config['''num_epochs'''] ) a = int(config['''seed'''] ) a = int(config['''batch_size'''] ) a = evaluate.load('''glue''', '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case_ ) def inner_training_loop(snake_case_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Instantiate optimizer a = AdamW(params=model.parameters(), lr=snake_case_ ) a , a = get_dataloaders(snake_case_, snake_case_ ) # Instantiate scheduler a = get_linear_schedule_with_warmup( optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a = model(**snake_case_ ) a = outputs.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a = model(**snake_case_ ) a = outputs.logits.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case_, references=snake_case_, ) a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""", snake_case_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: """simple docstring""" a = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' ) a = parser.parse_args() a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(snake_case_, snake_case_ ) if __name__ == "__main__": main()
330
0
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
353
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } UpperCamelCase__ : Optional[Any] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for attribute in key.split('''.''' ): a = getattr(snake_case_, snake_case_ ) if weight_type is not None: a = getattr(snake_case_, snake_case_ ).shape else: a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value else: a = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = [] a = fairseq_model.state_dict() a = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', ) a = True else: for key, mapped_key in MAPPING.items(): a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue a = True if "*" in mapped_key: a = name.split(snake_case_ )[0].split('''.''' )[-2] a = mapped_key.replace('''*''', snake_case_ ) if "weight_g" in name: a = '''weight_g''' elif "weight_v" in name: a = '''weight_v''' elif "bias" in name: a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj a = '''weight''' else: a = None set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = full_name.split('''conv_layers.''' )[-1] a = name.split('''.''' ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Union[str, Any]: """simple docstring""" if config_path is not None: a = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: a = UniSpeechSatConfig() a = '''''' if is_finetuned: a = UniSpeechSatForCTC(snake_case_ ) else: a = UniSpeechSatForPreTraining(snake_case_ ) a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) a = model[0].eval() recursively_load_weights(snake_case_, snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase__ : int = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
0
import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase_ ( a_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = ProphetNetTokenizer SCREAMING_SNAKE_CASE_ = False def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' super().setUp() a = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] a = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Any ): '''simple docstring''' a = '''UNwant\u00E9d,running''' a = '''unwanted, running''' return input_text, output_text def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = self.tokenizer_class(self.vocab_file ) a = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowerCamelCase ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) ,[9, 6, 7, 12, 10, 11] ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' a = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = BasicTokenizer(do_lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] ) def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = BasicTokenizer(do_lower_case=__lowerCamelCase ,strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = BasicTokenizer(do_lower_case=__lowerCamelCase ,strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = BasicTokenizer(do_lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = BasicTokenizer(do_lower_case=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = BasicTokenizer(do_lower_case=__lowerCamelCase ,strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = BasicTokenizer(do_lower_case=__lowerCamelCase ,strip_accents=__lowerCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = BasicTokenizer(do_lower_case=__lowerCamelCase ,never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] a = {} for i, token in enumerate(__lowerCamelCase ): a = i a = WordpieceTokenizer(vocab=__lowerCamelCase ,unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) ,[] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] a = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02] a = tokenizer(__lowerCamelCase ,padding=__lowerCamelCase ,return_tensors='''pt''' ) self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase ) a = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) self.assertEqual((2, 9) ,batch.input_ids.shape ) self.assertEqual((2, 9) ,batch.attention_mask.shape ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' ) a = tokenizer.encode('''sequence builders''' ,add_special_tokens=__lowerCamelCase ) a = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ) a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase ,__lowerCamelCase ) assert encoded_sentence == text + [1_02] assert encoded_pair == text + [1_02] + text_a + [1_02]
354
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]: """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" class lowerCamelCase_ : def __init__( self : Dict ,__lowerCamelCase : List[str] ): '''simple docstring''' a = metric_id class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() ) @pytest.mark.parametrize( '''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple: """simple docstring""" if "tmp_path" in args: a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(snake_case_, match='''https://huggingface.co/docs/evaluate''' ): func(*snake_case_ )
330
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> float: """simple docstring""" if mass < 0: raise ValueError('''The mass of a body cannot be negative''' ) return 0.5 * mass * abs(snake_case_ ) * abs(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
355
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'luke' def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase ) a = vocab_size a = entity_vocab_size a = hidden_size a = entity_emb_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = use_entity_aware_attention a = classifier_dropout
330
0
from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE_ = 'Pix2StructImageProcessor' SCREAMING_SNAKE_CASE_ = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self : Tuple ,__lowerCamelCase : Any ,__lowerCamelCase : Dict ): '''simple docstring''' a = False super().__init__(__lowerCamelCase ,__lowerCamelCase ) def __call__( self : Any ,__lowerCamelCase : str=None ,__lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,__lowerCamelCase : bool = True ,__lowerCamelCase : Union[bool, str, PaddingStrategy] = False ,__lowerCamelCase : Union[bool, str, TruncationStrategy] = None ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[int] = 20_48 ,__lowerCamelCase : int = 0 ,__lowerCamelCase : Optional[int] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = True ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,**__lowerCamelCase : Union[str, Any] ,): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: a = self.tokenizer a = self.tokenizer( text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values a = self.image_processor( __lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,**__lowerCamelCase ) else: # add pixel_values and bbox a = self.image_processor( __lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,header_text=__lowerCamelCase ,**__lowerCamelCase ) if text is not None and not self.image_processor.is_vqa: a = self.tokenizer( text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,) if "attention_mask" in text_encoding: a = text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: a = text_encoding.pop('''input_ids''' ) else: a = None if text_encoding is not None: encoding_image_processor.update(__lowerCamelCase ) return encoding_image_processor def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ,*__lowerCamelCase : List[str] ,**__lowerCamelCase : Tuple ): '''simple docstring''' return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,*__lowerCamelCase : List[Any] ,**__lowerCamelCase : Any ): '''simple docstring''' return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = self.tokenizer.model_input_names a = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
356
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": UpperCamelCase__ : Optional[int] = pd.read_csv("""sample_data.csv""", header=None) UpperCamelCase__ : Tuple = df.shape[:1][0] # If you're using some other dataset input the target column UpperCamelCase__ : List[Any] = df.iloc[:, 1:2] UpperCamelCase__ : Union[str, Any] = actual_data.values.reshape(len_data, 1) UpperCamelCase__ : List[Any] = MinMaxScaler().fit_transform(actual_data) UpperCamelCase__ : Optional[Any] = 10 UpperCamelCase__ : int = 5 UpperCamelCase__ : List[str] = 20 UpperCamelCase__ : Optional[int] = len_data - periods * look_back UpperCamelCase__ : Union[str, Any] = actual_data[:division] UpperCamelCase__ : str = actual_data[division - look_back :] UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = [], [] UpperCamelCase__ , UpperCamelCase__ : str = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) UpperCamelCase__ : List[str] = np.array(train_x) UpperCamelCase__ : Optional[Any] = np.array(test_x) UpperCamelCase__ : Tuple = np.array([list(i.ravel()) for i in train_y]) UpperCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y]) UpperCamelCase__ : Union[str, Any] = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") UpperCamelCase__ : Tuple = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) UpperCamelCase__ : Tuple = model.predict(x_test)
330
0
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('''dataset_size''', [None, 4_0_0 * 2**2_0, 6_0_0 * 2**2_0] ) @pytest.mark.parametrize('''input_in_memory_max_size''', ['''default''', 0, 1_0_0 * 2**2_0, 9_0_0 * 2**2_0] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> str: """simple docstring""" if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config, '''IN_MEMORY_MAX_SIZE''', snake_case_ ) a = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: a = dataset_size < in_memory_max_size else: a = False a = is_small_dataset(snake_case_ ) assert result == expected
357
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple: """simple docstring""" a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = 0.01 with locka.acquire(): with pytest.raises(snake_case_ ): a = time.time() locka.acquire(snake_case_ ) assert time.time() - _start > timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" a = '''a''' * 1_0_0_0 + '''.lock''' a = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(snake_case_ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 a = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(snake_case_ ): locka.acquire(0 )
330
0
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowerCamelCase_ ( a_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ = BarthezTokenizer SCREAMING_SNAKE_CASE_ = BarthezTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' super().setUp() a = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=__lowerCamelCase ) a = tokenizer def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = '''<pad>''' a = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) ,__lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'''<s>''' ) self.assertEqual(vocab_keys[1] ,'''<pad>''' ) self.assertEqual(vocab_keys[-1] ,'''<mask>''' ) self.assertEqual(len(__lowerCamelCase ) ,10_11_22 ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,10_11_22 ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' a = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] a = [0, 57, 30_18, 7_03_07, 91, 2] a = self.tokenizer( __lowerCamelCase ,max_length=len(__lowerCamelCase ) ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,return_tensors='''pt''' ) self.assertIsInstance(__lowerCamelCase ,__lowerCamelCase ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) a = batch.input_ids.tolist()[0] self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' if not self.test_rust_tokenizer: return a = self.get_tokenizer() a = self.get_rust_tokenizer() a = '''I was born in 92000, and this is falsé.''' a = tokenizer.tokenize(__lowerCamelCase ) a = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) a = tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) a = rust_tokenizer.encode(__lowerCamelCase ,add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) a = self.get_rust_tokenizer() a = tokenizer.encode(__lowerCamelCase ) a = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase ,__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = {'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. a = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase ,model_name='''moussaKam/mbarthez''' ,revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' ,sequences=__lowerCamelCase ,)
358
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Dict = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'vit_mae' def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = decoder_num_attention_heads a = decoder_hidden_size a = decoder_num_hidden_layers a = decoder_intermediate_size a = mask_ratio a = norm_pix_loss
330
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu UpperCamelCase__ : Optional[int] = [ """EAGER""", """AOT_EAGER""", """INDUCTOR""", """NVFUSER""", """AOT_NVFUSER""", """AOT_CUDAGRAPHS""", """OFI""", """FX2TRT""", """ONNXRT""", """IPEX""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=None, snake_case_=None, snake_case_=None ) -> int: """simple docstring""" a = True while ask_again: a = input(snake_case_ ) try: if default is not None and len(snake_case_ ) == 0: return default return convert_value(snake_case_ ) if convert_value is not None else result except Exception: if error_message is not None: print(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_=[], snake_case_=None, snake_case_=0 ) -> int: """simple docstring""" a = BulletMenu(snake_case_, snake_case_ ) a = menu.run(default_choice=snake_case_ ) return convert_value(snake_case_ ) if convert_value is not None else result def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" a = int(snake_case_ ) return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" a = int(snake_case_ ) return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple: """simple docstring""" a = int(snake_case_ ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" a = int(snake_case_ ) return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]: """simple docstring""" a = int(snake_case_ ) return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]: """simple docstring""" return {"yes": True, "no": False}[value.lower()] class lowerCamelCase_ ( argparse.RawDescriptionHelpFormatter ): def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : List[str] ,__lowerCamelCase : Optional[int] ): '''simple docstring''' a = super()._format_usage(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = usage.replace('''<command> [<args>] ''' ,'''''' ) return usage
359
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]: """simple docstring""" stooge(snake_case_, 0, len(snake_case_ ) - 1 ) return arr def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]: """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: a , a = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: a = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) # Recursively sort last 2/3 elements stooge(snake_case_, i + t, (snake_case_) ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) if __name__ == "__main__": UpperCamelCase__ : Dict = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase__ : Optional[int] = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
330
0
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> bool: """simple docstring""" a = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
360
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } UpperCamelCase__ : Union[str, Any] = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } UpperCamelCase__ : str = { """jukebox""": 512, } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_LYRIC_TOKENS_SIZES SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=["v3", "v2", "v2"] ,__lowerCamelCase : List[Any]=5_12 ,__lowerCamelCase : Tuple=5 ,__lowerCamelCase : List[Any]="<|endoftext|>" ,**__lowerCamelCase : List[str] ,): '''simple docstring''' a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token super().__init__( unk_token=__lowerCamelCase ,n_genres=__lowerCamelCase ,version=__lowerCamelCase ,max_n_lyric_tokens=__lowerCamelCase ,**__lowerCamelCase ,) a = version a = max_n_lyric_tokens a = n_genres with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) a = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: a = oov.replace(r'''\-\'''' ,r'''\-+\'''' ) a = regex.compile(__lowerCamelCase ) a = {v: k for k, v in self.artists_encoder.items()} a = {v: k for k, v in self.genres_encoder.items()} a = {v: k for k, v in self.lyrics_encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ): '''simple docstring''' a = [self.artists_encoder.get(__lowerCamelCase ,0 ) for artist in list_artists] for genres in range(len(__lowerCamelCase ) ): a = [self.genres_encoder.get(__lowerCamelCase ,0 ) for genre in list_genres[genres]] a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) a = [[self.lyrics_encoder.get(__lowerCamelCase ,0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[str] ): '''simple docstring''' return list(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' a , a , a = self.prepare_for_tokenization(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = self._tokenize(__lowerCamelCase ) return artist, genre, lyrics def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": a = artists[idx].lower() a = [genres[idx].lower()] else: a = self._normalize(artists[idx] ) + '''.v2''' a = [ self._normalize(__lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' a = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )} a = 0 a = len(__lowerCamelCase ) + 1 a = self.vocab a = {v: k for k, v in self.vocab.items()} a = '''''' else: a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) a = self._run_strip_accents(__lowerCamelCase ) a = lyrics.replace('''\\''' ,'''\n''' ) a = self.out_of_vocab.sub('''''' ,__lowerCamelCase ), [], [] return artists, genres, lyrics def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ): '''simple docstring''' a = unicodedata.normalize('''NFD''' ,__lowerCamelCase ) a = [] for char in text: a = unicodedata.category(__lowerCamelCase ) if cat == "Mn": continue output.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ): '''simple docstring''' a = ( [chr(__lowerCamelCase ) for i in range(ord('''a''' ) ,ord('''z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''A''' ) ,ord('''Z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''0''' ) ,ord('''9''' ) + 1 )] + ['''.'''] ) a = frozenset(__lowerCamelCase ) a = re.compile(r'''_+''' ) a = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) a = pattern.sub('''_''' ,__lowerCamelCase ).strip('''_''' ) return text def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[str] ): '''simple docstring''' return " ".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : bool = False ): '''simple docstring''' if not isinstance(__lowerCamelCase ,__lowerCamelCase ): a = TensorType(__lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf a = tf.constant a = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch a = torch.tensor a = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 a = jnp.array a = _is_jax else: a = np.asarray a = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: a = [inputs] if not is_tensor(__lowerCamelCase ): a = as_tensor(__lowerCamelCase ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str]="" ,__lowerCamelCase : List[Any]="pt" ): '''simple docstring''' a = [0, 0, 0] a = [artist] * len(self.version ) a = [genres] * len(self.version ) a , a , a = self.tokenize(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a , a , a = self._convert_token_to_id(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = [-INFINITY] * len(full_tokens[-1] ) a = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=__lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=__lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : str ): '''simple docstring''' a = self.artists_decoder.get(__lowerCamelCase ) a = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index] a = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
330
0
UpperCamelCase__ : str = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = set() # keep track of all the paths to be checked a = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue a = queue.pop(0 ) # get the last node from the path a = path[-1] if node not in explored: a = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: a = list(snake_case_ ) new_path.append(snake_case_ ) queue.append(snake_case_ ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(snake_case_ ) # in case there's no path between the 2 nodes return [] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int: """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 a = [start] a = set(snake_case_ ) # Keep tab on distances from `start` node. a = {start: 0, target: -1} while queue: a = queue.pop(0 ) if node == target: a = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(snake_case_ ) queue.append(snake_case_ ) a = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
361
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test UpperCamelCase__ : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab)))) UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ : Optional[Any] = Path(tmpdirname) UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) UpperCamelCase__ : Dict = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase__ : Union[str, Any] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""") UpperCamelCase__ : Tuple = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
330
0
class lowerCamelCase_ : def __init__( self : int ,__lowerCamelCase : str ): '''simple docstring''' a = val a = None a = None def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' if self.val: if val < self.val: if self.left is None: a = Node(__lowerCamelCase ) else: self.left.insert(__lowerCamelCase ) elif val > self.val: if self.right is None: a = Node(__lowerCamelCase ) else: self.right.insert(__lowerCamelCase ) else: a = val def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str: """simple docstring""" if root: inorder(root.left, snake_case_ ) res.append(root.val ) inorder(root.right, snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]: """simple docstring""" if len(snake_case_ ) == 0: return arr a = Node(arr[0] ) for i in range(1, len(snake_case_ ) ): root.insert(arr[i] ) # Traverse BST in order. a = [] inorder(snake_case_, snake_case_ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
362
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) UpperCamelCase__ : Optional[Any] = """bert-base-cased""" UpperCamelCase__ : int = """fp16""" UpperCamelCase__ : str = """bf16""" UpperCamelCase__ : List[Any] = [FPaa, BFaa] @require_fsdp @require_cuda class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' super().setUp() a = dict( ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = F"""{i + 1}""" a = strategy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = prefetch_policy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = state_dict_type with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = AutoModel.from_pretrained(__lowerCamelCase ) for policy in FSDP_AUTO_WRAP_POLICY: a = self.dist_env.copy() a = policy if policy == "TRANSFORMER_BASED_WRAP": a = '''BertLayer''' elif policy == "SIZE_BASED_WRAP": a = '''2000''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) a = self.dist_env.copy() a = '''TRANSFORMER_BASED_WRAP''' a = '''T5Layer''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() with self.assertRaises(__lowerCamelCase ) as cm: fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) ) a = self.dist_env.copy() a = '''SIZE_BASED_WRAP''' a = '''0''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: a = self.dist_env.copy() a = mp_dtype with mockenv_context(**__lowerCamelCase ): a = Accelerator() if mp_dtype == "fp16": a = torch.floataa elif mp_dtype == "bf16": a = torch.bfloataa a = MixedPrecision(param_dtype=__lowerCamelCase ,reduce_dtype=__lowerCamelCase ,buffer_dtype=__lowerCamelCase ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__lowerCamelCase ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,__lowerCamelCase ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: a = self.dist_env.copy() a = str(__lowerCamelCase ).lower() with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__lowerCamelCase ) ) @require_fsdp @require_multi_gpu @slow class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' super().setUp() a = 0.82 a = [ '''fsdp_shard_grad_op_transformer_based_wrap''', '''fsdp_full_shard_transformer_based_wrap''', ] a = { '''multi_gpu_fp16''': 32_00, '''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 20_00, '''fsdp_full_shard_transformer_based_wrap_fp16''': 19_00, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } a = 1_60 a = 1_60 a = inspect.getfile(accelerate.test_utils ) a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_performance.py''' ) a = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp'''] for config in self.performance_configs: a = cmd.copy() for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in config: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "fp32" in config: cmd_config.append('''--mixed_precision=no''' ) else: cmd_config.append('''--mixed_precision=fp16''' ) if "cpu_offload" in config: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--performance_lower_bound={self.performance_lower_bound}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''', '''--mixed_precision=fp16''', '''--fsdp_transformer_layer_cls_to_wrap=BertLayer''', ] for i, strategy in enumerate(__lowerCamelCase ): a = cmd.copy() cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) if strategy != "FULL_SHARD": continue a = len(__lowerCamelCase ) for state_dict_type in FSDP_STATE_DICT_TYPE: a = cmd_config[:state_dict_config_index] cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", '''--partial_train_epoch=1''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) a = cmd_config[:-1] a = os.path.join(self.tmpdir ,'''epoch_0''' ) cmd_config.extend( [ F"""--resume_from_checkpoint={resume_from_checkpoint}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): a = cmd.copy() if "fp16" in spec: cmd_config.extend(['''--mixed_precision=fp16'''] ) else: cmd_config.extend(['''--mixed_precision=no'''] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['''--use_fsdp'''] ) for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in spec: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "cpu_offload" in spec: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--peak_memory_upper_bound={peak_mem_upper_bound}""", F"""--n_train={self.n_train}""", F"""--n_val={self.n_val}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
330
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ : List[str] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Dict = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
363
from __future__ import annotations import os from collections.abc import Mapping UpperCamelCase__ : Any = tuple[int, int] class lowerCamelCase_ : def __init__( self : Optional[Any] ,__lowerCamelCase : set[int] ,__lowerCamelCase : Mapping[EdgeT, int] ): '''simple docstring''' a = vertices a = { (min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items() } def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : EdgeT ,__lowerCamelCase : int ): '''simple docstring''' self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) a = weight def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = Graph({min(self.vertices )} ,{} ) a = 42 a = 42 a = 42 a = 42 while len(subgraph.vertices ) < len(self.vertices ): a = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: a = edge a = weight subgraph.add_edge(__lowerCamelCase ,__lowerCamelCase ) return subgraph def SCREAMING_SNAKE_CASE__ ( snake_case_ = "p107_network.txt" ) -> int: """simple docstring""" a = os.path.abspath(os.path.dirname(snake_case_ ) ) a = os.path.join(snake_case_, snake_case_ ) a = {} a = 42 a = 42 a = 42 with open(snake_case_ ) as f: a = f.read().strip().split('''\n''' ) a = [line.split(''',''' ) for line in data] for edgea in range(1, len(snake_case_ ) ): for edgea in range(snake_case_ ): if adjaceny_matrix[edgea][edgea] != "-": a = int(adjaceny_matrix[edgea][edgea] ) a = Graph(set(range(len(snake_case_ ) ) ), snake_case_ ) a = graph.prims_algorithm() a = sum(graph.edges.values() ) a = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"{solution() = }")
330
0
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Any: """simple docstring""" a = [] for part_id in partition_order: a = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect() for row_idx, row in enumerate(snake_case_ ): expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE__ ( ) -> List[str]: """simple docstring""" a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() a = spark.range(1_0_0 ).repartition(1 ) a = Spark(snake_case_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=1_6 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 5_0 @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: """simple docstring""" a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() a = spark.range(1_0 ).repartition(2 ) a = [1, 0] a = _generate_iterable_examples(snake_case_, snake_case_ ) # Reverse the partitions. a = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_, snake_case_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): a , a = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]: """simple docstring""" a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() a = spark.range(1_0 ).repartition(1 ) a = SparkExamplesIterable(snake_case_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(snake_case_ ): assert row_id == f"""0_{i}""" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE__ ( ) -> Any: """simple docstring""" a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() a = spark.range(3_0 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('''numpy.random.Generator''' ) as generator_mock: a = lambda snake_case_ : x.reverse() a = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_, [2, 1, 0] ) a = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(snake_case_ ): a , a = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE__ ( ) -> str: """simple docstring""" a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() a = spark.range(2_0 ).repartition(4 ) # Partitions 0 and 2 a = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0, num_workers=2 ) assert shard_it_a.n_shards == 2 a = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_, [0, 2] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): a , a = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 a = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1, num_workers=2 ) assert shard_it_a.n_shards == 2 a = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_, [1, 3] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): a , a = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]: """simple docstring""" a = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate() a = spark.range(1_0_0 ).repartition(1 ) a = Spark(snake_case_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
364
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig UpperCamelCase__ : List[Any] = logging.get_logger(__name__) # General docstring UpperCamelCase__ : List[Any] = """RegNetConfig""" # Base docstring UpperCamelCase__ : Dict = """facebook/regnet-y-040""" UpperCamelCase__ : int = [1, 1_088, 7, 7] # Image classification docstring UpperCamelCase__ : Optional[Any] = """facebook/regnet-y-040""" UpperCamelCase__ : Dict = """tabby, tabby cat""" UpperCamelCase__ : Dict = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[str] ,__lowerCamelCase : int ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : Optional[str] = "relu" ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb a = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=__lowerCamelCase ,strides=__lowerCamelCase ,padding='''VALID''' ,groups=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ,) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) a = ACTaFN[activation] if activation is not None else tf.identity def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : List[str] ): '''simple docstring''' a = self.convolution(self.padding(__lowerCamelCase ) ) a = self.normalization(__lowerCamelCase ) a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Any ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : List[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config.num_channels a = TFRegNetConvLayer( out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='''embedder''' ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = shape_list(__lowerCamelCase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) a = tf.transpose(__lowerCamelCase ,perm=(0, 2, 3, 1) ) a = self.embedder(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : str ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Tuple ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.ConvaD( filters=__lowerCamelCase ,kernel_size=1 ,strides=__lowerCamelCase ,use_bias=__lowerCamelCase ,name='''convolution''' ) a = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name='''normalization''' ) def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ): '''simple docstring''' return self.normalization(self.convolution(__lowerCamelCase ) ,training=__lowerCamelCase ) class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : List[Any] ,__lowerCamelCase : int ,__lowerCamelCase : int ,**__lowerCamelCase : str ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) a = [ tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''relu''' ,name='''attention.0''' ), tf.keras.layers.ConvaD(filters=__lowerCamelCase ,kernel_size=1 ,activation='''sigmoid''' ,name='''attention.2''' ), ] def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Optional[Any] ): '''simple docstring''' a = self.pooler(__lowerCamelCase ) for layer_module in self.attention: a = layer_module(__lowerCamelCase ) a = hidden_state * pooled return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : Dict ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.2''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Dict ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 1 ,**__lowerCamelCase : List[str] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = in_channels != out_channels or stride != 1 a = max(1 ,out_channels // config.groups_width ) a = ( TFRegNetShortCut(__lowerCamelCase ,stride=__lowerCamelCase ,name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' ,name='''shortcut''' ) ) a = [ TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=config.hidden_act ,name='''layer.0''' ), TFRegNetConvLayer( __lowerCamelCase ,stride=__lowerCamelCase ,groups=__lowerCamelCase ,activation=config.hidden_act ,name='''layer.1''' ), TFRegNetSELayer(__lowerCamelCase ,reduced_channels=int(round(in_channels / 4 ) ) ,name='''layer.2''' ), TFRegNetConvLayer(__lowerCamelCase ,kernel_size=1 ,activation=__lowerCamelCase ,name='''layer.3''' ), ] a = ACTaFN[config.hidden_act] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : str ): '''simple docstring''' a = hidden_state for layer_module in self.layers: a = layer_module(__lowerCamelCase ) a = self.shortcut(__lowerCamelCase ) hidden_state += residual a = self.activation(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,__lowerCamelCase : int ,__lowerCamelCase : int ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 2 ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer a = [ # downsampling is done in the first layer with stride of 2 layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,stride=__lowerCamelCase ,name='''layers.0''' ), *[layer(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : int ): '''simple docstring''' for layer_module in self.layers: a = layer_module(__lowerCamelCase ) return hidden_state class lowerCamelCase_ ( tf.keras.layers.Layer ): def __init__( self : Union[str, Any] ,__lowerCamelCase : RegNetConfig ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( __lowerCamelCase ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='''stages.0''' ,) ) a = zip(config.hidden_sizes ,config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(__lowerCamelCase ,config.depths[1:] ) ): self.stages.append(TFRegNetStage(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,depth=__lowerCamelCase ,name=F"""stages.{i+1}""" ) ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : bool = False ,__lowerCamelCase : bool = True ): '''simple docstring''' a = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: a = hidden_states + (hidden_state,) a = stage_module(__lowerCamelCase ) if output_hidden_states: a = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=__lowerCamelCase ,hidden_states=__lowerCamelCase ) @keras_serializable class lowerCamelCase_ ( tf.keras.layers.Layer ): SCREAMING_SNAKE_CASE_ = RegNetConfig def __init__( self : Dict ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = config a = TFRegNetEmbeddings(__lowerCamelCase ,name='''embedder''' ) a = TFRegNetEncoder(__lowerCamelCase ,name='''encoder''' ) a = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__lowerCamelCase ,name='''pooler''' ) @unpack_inputs def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : bool = False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.embedder(__lowerCamelCase ,training=__lowerCamelCase ) a = self.encoder( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = encoder_outputs[0] a = self.pooler(__lowerCamelCase ) # Change to NCHW output format have uniformity in the modules a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) a = tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: a = tuple([tf.transpose(__lowerCamelCase ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__lowerCamelCase ,pooler_output=__lowerCamelCase ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = RegNetConfig SCREAMING_SNAKE_CASE_ = 'regnet' SCREAMING_SNAKE_CASE_ = 'pixel_values' @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )} UpperCamelCase__ : Union[str, Any] = R""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ UpperCamelCase__ : List[str] = R""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare RegNet model outputting raw features without any specific head on top.' , a_ , ) class lowerCamelCase_ ( a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : int ,**__lowerCamelCase : Union[str, Any] ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : tf.Tensor ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : Optional[bool] = None ,__lowerCamelCase : List[str]=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( pixel_values=__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ,) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,) @add_start_docstrings( '\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , a_ , ) class lowerCamelCase_ ( a_ , a_ ): def __init__( self : Optional[int] ,__lowerCamelCase : RegNetConfig ,*__lowerCamelCase : str ,**__lowerCamelCase : Any ): '''simple docstring''' super().__init__(__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) a = config.num_labels a = TFRegNetMainLayer(__lowerCamelCase ,name='''regnet''' ) # classification head a = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels ,name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(__lowerCamelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=__lowerCamelCase ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : tf.Tensor = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Dict=False ,): '''simple docstring''' a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = return_dict if return_dict is not None else self.config.use_return_dict a = self.regnet( __lowerCamelCase ,output_hidden_states=__lowerCamelCase ,return_dict=__lowerCamelCase ,training=__lowerCamelCase ) a = outputs.pooler_output if return_dict else outputs[1] a = self.classifier[0](__lowerCamelCase ) a = self.classifier[1](__lowerCamelCase ) a = None if labels is None else self.hf_compute_loss(labels=__lowerCamelCase ,logits=__lowerCamelCase ) if not return_dict: a = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=__lowerCamelCase ,logits=__lowerCamelCase ,hidden_states=outputs.hidden_states )
330
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" if is_torch_version('''<''', '''2.0.0''' ) or not hasattr(snake_case_, '''_dynamo''' ): return False return isinstance(snake_case_, torch._dynamo.eval_frame.OptimizedModule ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = True ) -> Any: """simple docstring""" a = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) a = is_compiled_module(snake_case_ ) if is_compiled: a = model a = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(snake_case_, snake_case_ ): a = model.module if not keep_fpaa_wrapper: a = getattr(snake_case_, '''forward''' ) a = model.__dict__.pop('''_original_forward''', snake_case_ ) if original_forward is not None: while hasattr(snake_case_, '''__wrapped__''' ): a = forward.__wrapped__ if forward == original_forward: break a = forward if getattr(snake_case_, '''_converted_to_transformer_engine''', snake_case_ ): convert_model(snake_case_, to_transformer_engine=snake_case_ ) if is_compiled: a = model a = compiled_model return model def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]: """simple docstring""" PartialState().wait_for_everyone() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Tuple: """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(snake_case_, snake_case_ ) elif PartialState().local_process_index == 0: torch.save(snake_case_, snake_case_ ) @contextmanager def SCREAMING_SNAKE_CASE__ ( **snake_case_ ) -> Dict: """simple docstring""" for key, value in kwargs.items(): a = str(snake_case_ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple: """simple docstring""" if not hasattr(snake_case_, '''__qualname__''' ) and not hasattr(snake_case_, '''__name__''' ): a = getattr(snake_case_, '''__class__''', snake_case_ ) if hasattr(snake_case_, '''__qualname__''' ): return obj.__qualname__ if hasattr(snake_case_, '''__name__''' ): return obj.__name__ return str(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]: """simple docstring""" for key, value in source.items(): if isinstance(snake_case_, snake_case_ ): a = destination.setdefault(snake_case_, {} ) merge_dicts(snake_case_, snake_case_ ) else: a = value return destination def SCREAMING_SNAKE_CASE__ ( snake_case_ = None ) -> bool: """simple docstring""" if port is None: a = 2_9_5_0_0 with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s: return s.connect_ex(('''localhost''', port) ) == 0
365
from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : List[str] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'efficientformer' def __init__( self : Optional[int] ,__lowerCamelCase : List[int] = [3, 2, 6, 4] ,__lowerCamelCase : List[int] = [48, 96, 2_24, 4_48] ,__lowerCamelCase : List[bool] = [True, True, True, True] ,__lowerCamelCase : int = 4_48 ,__lowerCamelCase : int = 32 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : int = 7 ,__lowerCamelCase : int = 5 ,__lowerCamelCase : int = 8 ,__lowerCamelCase : int = 4 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 16 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 3 ,__lowerCamelCase : int = 2 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : float = 0.0 ,__lowerCamelCase : int = 1 ,__lowerCamelCase : bool = True ,__lowerCamelCase : bool = True ,__lowerCamelCase : float = 1e-5 ,__lowerCamelCase : str = "gelu" ,__lowerCamelCase : float = 0.02 ,__lowerCamelCase : float = 1e-12 ,__lowerCamelCase : int = 2_24 ,__lowerCamelCase : float = 1e-05 ,**__lowerCamelCase : Dict ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_act a = hidden_dropout_prob a = hidden_sizes a = num_hidden_layers a = num_attention_heads a = initializer_range a = layer_norm_eps a = patch_size a = num_channels a = depths a = mlp_expansion_ratio a = downsamples a = dim a = key_dim a = attention_ratio a = resolution a = pool_size a = downsample_patch_size a = downsample_stride a = downsample_pad a = drop_path_rate a = num_metaad_blocks a = distillation a = use_layer_scale a = layer_scale_init_value a = image_size a = batch_norm_eps
330
0
import numpy as np def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ = 1e-12, snake_case_ = 1_0_0, ) -> tuple[float, np.ndarray]: """simple docstring""" assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[1] # Ensure proper dimensionality. assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(snake_case_ ) == np.iscomplexobj(snake_case_ ) a = np.iscomplexobj(snake_case_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(snake_case_, input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. a = False a = 0 a = 0 a = 1e12 while not convergence: # Multiple matrix by the vector. a = np.dot(snake_case_, snake_case_ ) # Normalize the resulting output vector. a = w / np.linalg.norm(snake_case_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) a = vector.conj().T if is_complex else vector.T a = np.dot(snake_case_, np.dot(snake_case_, snake_case_ ) ) # Check convergence. a = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: a = True a = lambda_ if is_complex: a = np.real(lambda_ ) return lambda_, vector def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" a = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] ) a = np.array([4_1, 4, 2_0] ) a = real_input_matrix.astype(np.complexaaa ) a = np.triu(1J * complex_input_matrix, 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T a = np.array([4_1, 4, 2_0] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": a = real_input_matrix a = real_vector elif problem_type == "complex": a = complex_input_matrix a = complex_vector # Our implementation. a , a = power_iteration(snake_case_, snake_case_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). a , a = np.linalg.eigh(snake_case_ ) # Last eigenvalue is the maximum one. a = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. a = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(snake_case_ ) - np.abs(snake_case_ ) ) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
366
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCamelCase__ : Any = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] UpperCamelCase__ : Optional[Any] = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] UpperCamelCase__ : Optional[Any] = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : List[str] = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) UpperCamelCase__ : Optional[int] = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for tf_name, hf_name in patterns: a = k.replace(snake_case_, snake_case_ ) return k def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> BigBirdPegasusForConditionalGeneration: """simple docstring""" a = BigBirdPegasusConfig(**snake_case_ ) a = BigBirdPegasusForConditionalGeneration(snake_case_ ) a = torch_model.state_dict() a = {} # separating decoder weights a = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} a = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = DECODER_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ): a = [k.endswith(snake_case_ ) for ending in KEYS_TO_IGNORE] if any(snake_case_ ): continue a = REMAINING_PATTERNS a = rename_state_dict_key(snake_case_, snake_case_ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): a = v.T a = torch.from_numpy(snake_case_ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" a = mapping['''model.embed_positions.weight'''] a = mapping.pop('''model.embed_positions.weight''' ) a , a = torch_model.load_state_dict(snake_case_, strict=snake_case_ ) a = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict: """simple docstring""" a = tf.train.list_variables(snake_case_ ) a = {} a = ['''global_step'''] for name, shape in tqdm(snake_case_, desc='''converting tf checkpoint to dict''' ): a = any(pat in name for pat in ignore_name ) if skip_key: continue a = tf.train.load_variable(snake_case_, snake_case_ ) a = array return tf_weights def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> int: """simple docstring""" a = get_tf_weights_as_numpy(snake_case_ ) a = convert_bigbird_pegasus(snake_case_, snake_case_ ) torch_model.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : str = argparse.ArgumentParser() parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""") parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""") UpperCamelCase__ : int = parser.parse_args() UpperCamelCase__ : Tuple = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
330
0
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test UpperCamelCase__ : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab)))) UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ : Optional[Any] = Path(tmpdirname) UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) UpperCamelCase__ : Dict = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase__ : Union[str, Any] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""") UpperCamelCase__ : Tuple = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
367
import re def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str: """simple docstring""" if len(re.findall('''[ATCG]''', snake_case_ ) ) != len(snake_case_ ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
330
0
"""simple docstring""" from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class lowerCamelCase_ ( a_ , a_ ): SCREAMING_SNAKE_CASE_ = 'pixel_values' SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = TimmBackboneConfig def __init__( self : Optional[Any] ,__lowerCamelCase : str ,**__lowerCamelCase : Any ): '''simple docstring''' requires_backends(self ,'''timm''' ) super().__init__(__lowerCamelCase ) a = config if config.backbone is None: raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' ) if config.backbone not in timm.list_models(): raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" ) if hasattr(__lowerCamelCase ,'''out_features''' ) and config.out_features is not None: raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' ) a = getattr(__lowerCamelCase ,'''use_pretrained_backbone''' ,__lowerCamelCase ) if pretrained is None: raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' ) # We just take the final layer by default. This matches the default for the transformers models. a = config.out_indices if getattr(__lowerCamelCase ,'''out_indices''' ,__lowerCamelCase ) is not None else (-1,) a = timm.create_model( config.backbone ,pretrained=__lowerCamelCase ,features_only=config.features_only ,in_chans=config.num_channels ,out_indices=__lowerCamelCase ,**__lowerCamelCase ,) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. a = self._backbone.return_layers a = {layer['''module''']: str(__lowerCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(__lowerCamelCase ) @classmethod def SCREAMING_SNAKE_CASE_ ( cls : List[Any] ,__lowerCamelCase : Optional[Any] ,*__lowerCamelCase : List[Any] ,**__lowerCamelCase : int ): '''simple docstring''' requires_backends(cls ,['''vision''', '''timm'''] ) from ...models.timm_backbone import TimmBackboneConfig a = kwargs.pop('''config''' ,TimmBackboneConfig() ) a = kwargs.pop('''use_timm_backbone''' ,__lowerCamelCase ) if not use_timm: raise ValueError('''use_timm_backbone must be True for timm backbones''' ) a = kwargs.pop('''num_channels''' ,config.num_channels ) a = kwargs.pop('''features_only''' ,config.features_only ) a = kwargs.pop('''use_pretrained_backbone''' ,config.use_pretrained_backbone ) a = kwargs.pop('''out_indices''' ,config.out_indices ) a = TimmBackboneConfig( backbone=__lowerCamelCase ,num_channels=__lowerCamelCase ,features_only=__lowerCamelCase ,use_pretrained_backbone=__lowerCamelCase ,out_indices=__lowerCamelCase ,) return super()._from_config(__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : List[Any] ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Dict ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=None ,**__lowerCamelCase : Dict ): '''simple docstring''' a = return_dict if return_dict is not None else self.config.use_return_dict a = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) a = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('''Cannot output attentions for timm backbones at the moment''' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone a = self._all_layers a = self._backbone(__lowerCamelCase ,**__lowerCamelCase ) a = self._return_layers a = tuple(hidden_states[i] for i in self.out_indices ) else: a = self._backbone(__lowerCamelCase ,**__lowerCamelCase ) a = None a = tuple(__lowerCamelCase ) a = tuple(__lowerCamelCase ) if hidden_states is not None else None if not return_dict: a = (feature_maps,) if output_hidden_states: a = output + (hidden_states,) return output return BackboneOutput(feature_maps=__lowerCamelCase ,hidden_states=__lowerCamelCase ,attentions=__lowerCamelCase )
368
from __future__ import annotations from collections.abc import Sequence from typing import Literal def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> str | Literal[False]: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count += 1 a = '''_''' if count > 1: return False else: return "".join(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> list[str]: """simple docstring""" a = [] while True: a = ['''$'''] * len(snake_case_ ) a = [] for i in range(len(snake_case_ ) ): for j in range(i + 1, len(snake_case_ ) ): a = compare_string(binary[i], binary[j] ) if k is False: a = '''*''' a = '''*''' temp.append('''X''' ) for i in range(len(snake_case_ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(snake_case_ ) == 0: return pi a = list(set(snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] for minterm in minterms: a = '''''' for _ in range(snake_case_ ): a = str(minterm % 2 ) + string minterm //= 2 temp.append(snake_case_ ) return temp def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> bool: """simple docstring""" a = list(snake_case_ ) a = list(snake_case_ ) a = 0 for i in range(len(snake_case_ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[str]: """simple docstring""" a = [] a = [0] * len(snake_case_ ) for i in range(len(chart[0] ) ): a = 0 a = -1 for j in range(len(snake_case_ ) ): if chart[j][i] == 1: count += 1 a = j if count == 1: a = 1 for i in range(len(snake_case_ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(snake_case_ ) ): a = 0 temp.append(prime_implicants[i] ) while True: a = 0 a = -1 a = 0 for i in range(len(snake_case_ ) ): a = chart[i].count(1 ) if count_n > max_n: a = count_n a = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(snake_case_ ) ): a = 0 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> list[list[int]]: """simple docstring""" a = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )] for i in range(len(snake_case_ ) ): a = prime_implicants[i].count('''_''' ) for j in range(len(snake_case_ ) ): if is_for_table(prime_implicants[i], binary[j], snake_case_ ): a = 1 return chart def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" a = int(input('''Enter the no. of variables\n''' ) ) a = [ float(snake_case_ ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] a = decimal_to_binary(snake_case_, snake_case_ ) a = check(snake_case_ ) print('''Prime Implicants are:''' ) print(snake_case_ ) a = prime_implicant_chart(snake_case_, snake_case_ ) a = selection(snake_case_, snake_case_ ) print('''Essential Prime Implicants are:''' ) print(snake_case_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
330
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase__ : List[str] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = ['pixel_values'] def __init__( self : List[str] ,__lowerCamelCase : bool = True ,__lowerCamelCase : Dict[str, int] = None ,__lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC ,__lowerCamelCase : bool = True ,__lowerCamelCase : Dict[str, int] = None ,__lowerCamelCase : bool = True ,__lowerCamelCase : Union[int, float] = 1 / 2_55 ,__lowerCamelCase : bool = True ,__lowerCamelCase : Optional[Union[float, List[float]]] = None ,__lowerCamelCase : Optional[Union[float, List[float]]] = None ,__lowerCamelCase : bool = True ,**__lowerCamelCase : Optional[int] ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = size if size is not None else {'''shortest_edge''': 2_24} a = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ) a = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} a = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ,param_name='''crop_size''' ) a = do_resize a = size a = resample a = do_center_crop a = crop_size a = do_rescale a = rescale_factor a = do_normalize a = image_mean if image_mean is not None else OPENAI_CLIP_MEAN a = image_std if image_std is not None else OPENAI_CLIP_STD a = do_convert_rgb def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : np.ndarray ,__lowerCamelCase : Dict[str, int] ,__lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC ,__lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**__lowerCamelCase : Any ,): '''simple docstring''' a = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) a = get_resize_output_image_size(__lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=__lowerCamelCase ) return resize(__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : np.ndarray ,__lowerCamelCase : Dict[str, int] ,__lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**__lowerCamelCase : str ,): '''simple docstring''' a = get_size_dict(__lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : np.ndarray ,__lowerCamelCase : Union[int, float] ,__lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**__lowerCamelCase : Tuple ,): '''simple docstring''' return rescale(__lowerCamelCase ,scale=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : np.ndarray ,__lowerCamelCase : Union[float, List[float]] ,__lowerCamelCase : Union[float, List[float]] ,__lowerCamelCase : Optional[Union[str, ChannelDimension]] = None ,**__lowerCamelCase : List[Any] ,): '''simple docstring''' return normalize(__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : ImageInput ,__lowerCamelCase : bool = None ,__lowerCamelCase : Dict[str, int] = None ,__lowerCamelCase : PILImageResampling = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : int = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : float = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Optional[Union[float, List[float]]] = None ,__lowerCamelCase : Optional[Union[float, List[float]]] = None ,__lowerCamelCase : bool = None ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST ,**__lowerCamelCase : Tuple ,): '''simple docstring''' a = do_resize if do_resize is not None else self.do_resize a = size if size is not None else self.size a = get_size_dict(__lowerCamelCase ,param_name='''size''' ,default_to_square=__lowerCamelCase ) a = resample if resample is not None else self.resample a = do_center_crop if do_center_crop is not None else self.do_center_crop a = crop_size if crop_size is not None else self.crop_size a = get_size_dict(__lowerCamelCase ,param_name='''crop_size''' ,default_to_square=__lowerCamelCase ) a = do_rescale if do_rescale is not None else self.do_rescale a = rescale_factor if rescale_factor is not None else self.rescale_factor a = do_normalize if do_normalize is not None else self.do_normalize a = image_mean if image_mean is not None else self.image_mean a = image_std if image_std is not None else self.image_std a = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb a = make_list_of_images(__lowerCamelCase ) if not valid_images(__lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: a = [convert_to_rgb(__lowerCamelCase ) for image in images] # All transformations expect numpy arrays. a = [to_numpy_array(__lowerCamelCase ) for image in images] if do_resize: a = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images] if do_center_crop: a = [self.center_crop(image=__lowerCamelCase ,size=__lowerCamelCase ) for image in images] if do_rescale: a = [self.rescale(image=__lowerCamelCase ,scale=__lowerCamelCase ) for image in images] if do_normalize: a = [self.normalize(image=__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ) for image in images] a = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images] a = {'''pixel_values''': images} return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
369
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING UpperCamelCase__ : List[str] = logging.get_logger(__name__) @add_end_docstrings(a_ ) class lowerCamelCase_ ( a_ ): def __init__( self : int ,*__lowerCamelCase : str ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' super().__init__(*__lowerCamelCase ,**__lowerCamelCase ) requires_backends(self ,'''vision''' ) self.check_model_type(__lowerCamelCase ) def __call__( self : int ,__lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**__lowerCamelCase : str ): '''simple docstring''' return super().__call__(__lowerCamelCase ,**__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,**__lowerCamelCase : Dict ): '''simple docstring''' return {}, {}, {} def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = load_image(__lowerCamelCase ) a = image.size a = self.image_processor(images=__lowerCamelCase ,return_tensors=self.framework ) return model_inputs def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = self.model(**__lowerCamelCase ) return model_outputs def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ): '''simple docstring''' a = model_outputs.predicted_depth a = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) ,size=self.image_size[::-1] ,mode='''bicubic''' ,align_corners=__lowerCamelCase ) a = prediction.squeeze().cpu().numpy() a = (output * 2_55 / np.max(__lowerCamelCase )).astype('''uint8''' ) a = Image.fromarray(__lowerCamelCase ) a = {} a = predicted_depth a = depth return output_dict
330
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class lowerCamelCase_ : def __init__( self : Optional[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int]=13 ,__lowerCamelCase : Union[str, Any]=10 ,__lowerCamelCase : List[str]=3 ,__lowerCamelCase : int=2 ,__lowerCamelCase : List[Any]=2 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : Dict=True ,__lowerCamelCase : List[str]=32 ,__lowerCamelCase : List[str]=5 ,__lowerCamelCase : Union[str, Any]=4 ,__lowerCamelCase : List[str]=37 ,__lowerCamelCase : Optional[Any]="gelu" ,__lowerCamelCase : Optional[Any]=0.1 ,__lowerCamelCase : str=0.1 ,__lowerCamelCase : List[str]=10 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : Union[str, Any]="divided_space_time" ,__lowerCamelCase : Optional[int]=None ,): '''simple docstring''' a = parent a = batch_size a = image_size a = num_channels a = patch_size a = num_frames a = is_training a = use_labels a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = attention_type a = initializer_range a = scope a = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token a = (image_size // patch_size) ** 2 a = (num_frames) * self.num_patches_per_frame + 1 def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' a = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] ,self.num_labels ) a = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = TimesformerConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,) a = self.num_labels return config def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Dict ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ): '''simple docstring''' a = TimesformerModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Any ,__lowerCamelCase : int ): '''simple docstring''' a = TimesformerForVideoClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase ) # verify the logits shape a = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = self.prepare_config_and_inputs() a , a , a = config_and_inputs a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase_ ( a_ , a_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE_ = ( {'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = TimesformerModelTester(self ) a = ConfigTester( self ,config_class=__lowerCamelCase ,has_text_modality=__lowerCamelCase ,hidden_size=37 ) def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ,__lowerCamelCase : List[str] ,__lowerCamelCase : str=False ): '''simple docstring''' a = copy.deepcopy(__lowerCamelCase ) if return_labels: if model_class in get_values(__lowerCamelCase ): a = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=__lowerCamelCase ) return inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''TimeSformer does not use inputs_embeds''' ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' pass def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCamelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__lowerCamelCase ,nn.Linear ) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__lowerCamelCase ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__lowerCamelCase ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = TimesformerModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' if not self.has_attentions: pass else: a , a = self.model_tester.prepare_config_and_inputs_for_common() a = True for model_class in self.all_model_classes: a = self.model_tester.seq_length a = self.model_tester.num_frames a = True a = False a = True a = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) ) a = outputs.attentions self.assertEqual(len(__lowerCamelCase ) ,self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] a = True a = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) ) a = outputs.attentions self.assertEqual(len(__lowerCamelCase ) ,self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,) a = len(__lowerCamelCase ) # Check attention is always last and order is fine a = True a = True a = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) ) self.assertEqual(out_len + 1 ,len(__lowerCamelCase ) ) a = outputs.attentions self.assertEqual(len(__lowerCamelCase ) ,self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' def check_hidden_states_output(__lowerCamelCase : Any ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Optional[int] ): a = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): a = model(**self._prepare_for_class(__lowerCamelCase ,__lowerCamelCase ) ) a = outputs.hidden_states a = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__lowerCamelCase ) ,__lowerCamelCase ) a = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,) a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = True check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] a = True check_hidden_states_output(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: """simple docstring""" a = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''', filename='''eating_spaghetti.npy''', repo_type='''dataset''' ) a = np.load(snake_case_ ) return list(snake_case_ ) @require_torch @require_vision class lowerCamelCase_ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to( __lowerCamelCase ) a = self.default_image_processor a = prepare_video() a = image_processor(video[:8] ,return_tensors='''pt''' ).to(__lowerCamelCase ) # forward pass with torch.no_grad(): a = model(**__lowerCamelCase ) # verify the logits a = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape ,__lowerCamelCase ) a = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1e-4 ) )
370
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a_ ) class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = field(default='language-modeling' , metadata={'include_in_asdict_even_if_is_default': True} ) SCREAMING_SNAKE_CASE_ = Features({'text': Value('string' )} ) SCREAMING_SNAKE_CASE_ = Features({} ) SCREAMING_SNAKE_CASE_ = "text" @property def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' return {self.text_column: "text"}
330
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: UpperCamelCase__ : Union[str, Any] = None UpperCamelCase__ : List[Any] = logging.get_logger(__name__) UpperCamelCase__ : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase__ : int = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""", }, } UpperCamelCase__ : List[str] = { """camembert-base""": 512, } UpperCamelCase__ : List[Any] = """▁""" class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] SCREAMING_SNAKE_CASE_ = CamembertTokenizer def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : List[Any]=None ,__lowerCamelCase : List[Any]="<s>" ,__lowerCamelCase : Optional[Any]="</s>" ,__lowerCamelCase : Union[str, Any]="</s>" ,__lowerCamelCase : int="<s>" ,__lowerCamelCase : Any="<unk>" ,__lowerCamelCase : Optional[int]="<pad>" ,__lowerCamelCase : Optional[int]="<mask>" ,__lowerCamelCase : str=["<s>NOTUSED", "</s>NOTUSED"] ,**__lowerCamelCase : Union[str, Any] ,): '''simple docstring''' a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else mask_token super().__init__( __lowerCamelCase ,tokenizer_file=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,sep_token=__lowerCamelCase ,cls_token=__lowerCamelCase ,unk_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,mask_token=__lowerCamelCase ,additional_special_tokens=__lowerCamelCase ,**__lowerCamelCase ,) a = vocab_file a = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a = [self.cls_token_id] a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : List[int] ,__lowerCamelCase : Optional[List[int]] = None ): '''simple docstring''' a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ): copyfile(self.vocab_file ,__lowerCamelCase ) return (out_vocab_file,)
371
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCamelCase__ : Union[str, Any] = { """hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'yolos' def __init__( self : Union[str, Any] ,__lowerCamelCase : int=7_68 ,__lowerCamelCase : Dict=12 ,__lowerCamelCase : Union[str, Any]=12 ,__lowerCamelCase : List[Any]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : int=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : Optional[Any]=0.02 ,__lowerCamelCase : int=1e-12 ,__lowerCamelCase : Any=[5_12, 8_64] ,__lowerCamelCase : Tuple=16 ,__lowerCamelCase : int=3 ,__lowerCamelCase : Tuple=True ,__lowerCamelCase : Optional[int]=1_00 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=False ,__lowerCamelCase : int=1 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[int]=2 ,__lowerCamelCase : int=5 ,__lowerCamelCase : str=2 ,__lowerCamelCase : Tuple=0.1 ,**__lowerCamelCase : List[Any] ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = num_detection_tokens a = use_mid_position_embeddings a = auxiliary_loss # Hungarian matcher a = class_cost a = bbox_cost a = giou_cost # Loss coefficients a = bbox_loss_coefficient a = giou_loss_coefficient a = eos_coefficient class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' return 1e-4 @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return 12
330
0
import os import pytest from attr import dataclass UpperCamelCase__ : int = """us-east-1""" # defaults region @dataclass class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' SCREAMING_SNAKE_CASE_ = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_00, 'save_steps': 55_00, } SCREAMING_SNAKE_CASE_ = {**hyperparameters, 'max_steps': 10_00} @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ): '''simple docstring''' return F"""{self.framework}-transfromers-test""" @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' return F"""./tests/sagemaker/scripts/{self.framework}""" @property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='''class''' ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" a = SageMakerTestEnvironment(framework=request.cls.framework )
350
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> int: """simple docstring""" a = '''''' for i in table: res += inp[i - 1] return res def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" return data[1:] + data[0] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]: """simple docstring""" a = '''''' for i in range(len(snake_case_ ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = int('''0b''' + data[0] + data[-1], 2 ) a = int('''0b''' + data[1:3], 2 ) return bin(s[row][col] )[2:] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Optional[int]: """simple docstring""" a = message[:4] a = message[4:] a = apply_table(snake_case_, snake_case_ ) a = xor(snake_case_, snake_case_ ) a = apply_sbox(snake_case_, temp[:4] ) # noqa: E741 a = apply_sbox(snake_case_, temp[4:] ) a = '''0''' * (2 - len(snake_case_ )) + l # noqa: E741 a = '''0''' * (2 - len(snake_case_ )) + r a = apply_table(l + r, snake_case_ ) a = xor(snake_case_, snake_case_ ) return temp + right if __name__ == "__main__": UpperCamelCase__ : int = input("""Enter 10 bit key: """) UpperCamelCase__ : Union[str, Any] = input("""Enter 8 bit message: """) UpperCamelCase__ : Dict = [6, 3, 7, 4, 8, 5, 10, 9] UpperCamelCase__ : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6] UpperCamelCase__ : Optional[int] = [2, 4, 3, 1] UpperCamelCase__ : List[Any] = [2, 6, 3, 1, 4, 8, 5, 7] UpperCamelCase__ : str = [4, 1, 3, 5, 7, 2, 8, 6] UpperCamelCase__ : List[Any] = [4, 1, 2, 3, 2, 3, 4, 1] UpperCamelCase__ : int = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] UpperCamelCase__ : Dict = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation UpperCamelCase__ : Optional[Any] = apply_table(key, paa_table) UpperCamelCase__ : str = temp[:5] UpperCamelCase__ : List[Any] = temp[5:] UpperCamelCase__ : Dict = left_shift(left) UpperCamelCase__ : Any = left_shift(right) UpperCamelCase__ : Optional[Any] = apply_table(left + right, pa_table) UpperCamelCase__ : List[str] = left_shift(left) UpperCamelCase__ : int = left_shift(right) UpperCamelCase__ : List[str] = left_shift(left) UpperCamelCase__ : Dict = left_shift(right) UpperCamelCase__ : List[str] = apply_table(left + right, pa_table) # encryption UpperCamelCase__ : Tuple = apply_table(message, IP) UpperCamelCase__ : Optional[Any] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Optional[int] = temp[4:] + temp[:4] UpperCamelCase__ : Any = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Tuple = apply_table(temp, IP_inv) print("""Cipher text is:""", CT) # decryption UpperCamelCase__ : Union[str, Any] = apply_table(CT, IP) UpperCamelCase__ : List[str] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Optional[Any] = temp[4:] + temp[:4] UpperCamelCase__ : Optional[int] = function(expansion, sa, sa, keya, temp) UpperCamelCase__ : Any = apply_table(temp, IP_inv) print("""Plain text after decypting is:""", PT)
330
0
import math def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" if not isinstance(snake_case_, snake_case_ ): a = f"""Input value of [number={number}] must be an integer""" raise TypeError(snake_case_ ) if number < 1: a = f"""Input value of [number={number}] must be > 0""" raise ValueError(snake_case_ ) elif number == 1: return 3 elif number == 2: return 5 else: a = int(math.log(number // 3, 2 ) ) + 2 a = [3, 5] a = 2 a = 3 for block in range(1, snake_case_ ): for _ in range(snake_case_ ): proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] ) proth_index += 1 increment *= 2 return proth_list[number - 1] if __name__ == "__main__": import doctest doctest.testmod() for number in range(11): UpperCamelCase__ : List[Any] = 0 try: UpperCamelCase__ : Any = proth(number) except ValueError: print(F"ValueError: there is no {number}th Proth number") continue print(F"The {number}th Proth number: {value}")
351
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase_ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) a = '''The dog is cute and lives in the garden house''' a = jnp.array([tokenizer.encode(__lowerCamelCase )] ) a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim a = jnp.array( [[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] ) a = model(__lowerCamelCase )['''last_hidden_state'''] self.assertEqual(output.shape ,__lowerCamelCase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
330
0
import operator as op def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Any: """simple docstring""" a = [] a = lambda snake_case_, snake_case_ : int(x / y ) # noqa: E731 integer division operation a = { '''^''': op.pow, '''*''': op.mul, '''/''': div, '''+''': op.add, '''-''': op.sub, } # operators & their respective operation # print table header print('''Symbol'''.center(8 ), '''Action'''.center(1_2 ), '''Stack''', sep=''' | ''' ) print('''-''' * (3_0 + len(snake_case_ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(snake_case_ ) # append x to stack # output in tabular format print(x.rjust(8 ), ('''push(''' + x + ''')''').ljust(1_2 ), ''','''.join(snake_case_ ), sep=''' | ''' ) else: a = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ), ('''pop(''' + b + ''')''').ljust(1_2 ), ''','''.join(snake_case_ ), sep=''' | ''' ) a = stack.pop() # pop stack # output in tabular format print(''''''.rjust(8 ), ('''pop(''' + a + ''')''').ljust(1_2 ), ''','''.join(snake_case_ ), sep=''' | ''' ) stack.append( str(opr[x](int(snake_case_ ), int(snake_case_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ), ('''push(''' + a + x + b + ''')''').ljust(1_2 ), ''','''.join(snake_case_ ), sep=''' | ''', ) return int(stack[0] ) if __name__ == "__main__": UpperCamelCase__ : List[str] = input("""\n\nEnter a Postfix Equation (space separated) = """).split(""" """) print("""\n\tResult = """, solve(Postfix))
352
import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase__ : Union[str, Any] = 16 UpperCamelCase__ : Dict = 32 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ = 1_6 ) -> Tuple: """simple docstring""" a = AutoTokenizer.from_pretrained('''bert-base-cased''' ) a = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(snake_case_ ): # max_length=None => use the model max length (it's actually the default) a = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=snake_case_, max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): a = datasets.map( snake_case_, batched=snake_case_, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library a = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(snake_case_ ): # On TPU it's best to pad everything to the same length or training will be very slow. a = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": a = 1_6 elif accelerator.mixed_precision != "no": a = 8 else: a = None return tokenizer.pad( snake_case_, padding='''longest''', max_length=snake_case_, pad_to_multiple_of=snake_case_, return_tensors='''pt''', ) # Instantiate dataloaders. a = DataLoader( tokenized_datasets['''train'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) a = DataLoader( tokenized_datasets['''validation'''], shuffle=snake_case_, collate_fn=snake_case_, batch_size=snake_case_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase__ : int = mocked_dataloaders # noqa: F811 def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''', snake_case_ ) == "1": a = 2 # Initialize accelerator a = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs a = config['''lr'''] a = int(config['''num_epochs'''] ) a = int(config['''seed'''] ) a = int(config['''batch_size'''] ) a = evaluate.load('''glue''', '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=snake_case_ ) def inner_training_loop(snake_case_ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) a = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=snake_case_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). a = model.to(accelerator.device ) # Instantiate optimizer a = AdamW(params=model.parameters(), lr=snake_case_ ) a , a = get_dataloaders(snake_case_, snake_case_ ) # Instantiate scheduler a = get_linear_schedule_with_warmup( optimizer=snake_case_, num_warmup_steps=1_0_0, num_training_steps=(len(snake_case_ ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. a , a , a , a , a = accelerator.prepare( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) # Now we train the model for epoch in range(snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) a = model(**snake_case_ ) a = outputs.loss accelerator.backward(snake_case_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): a = model(**snake_case_ ) a = outputs.logits.argmax(dim=-1 ) a , a = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case_, references=snake_case_, ) a = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""", snake_case_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: """simple docstring""" a = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''', type=snake_case_, default=snake_case_, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' ) a = parser.parse_args() a = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6} training_function(snake_case_, snake_case_ ) if __name__ == "__main__": main()
330
0
from __future__ import annotations from typing import Generic, TypeVar UpperCamelCase__ : Dict = TypeVar("""T""") class lowerCamelCase_ ( Generic[T] ): def __init__( self : Dict ,__lowerCamelCase : T ): '''simple docstring''' a = data a = self a = 0 class lowerCamelCase_ ( Generic[T] ): def __init__( self : List[str] ): '''simple docstring''' a = {} def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : T ): '''simple docstring''' a = DisjointSetTreeNode(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : T ): '''simple docstring''' a = self.map[data] if elem_ref != elem_ref.parent: a = self.find_set(elem_ref.parent.data ) return elem_ref.parent def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ,__lowerCamelCase : DisjointSetTreeNode[T] ,__lowerCamelCase : DisjointSetTreeNode[T] ): '''simple docstring''' if nodea.rank > nodea.rank: a = nodea else: a = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : T ,__lowerCamelCase : T ): '''simple docstring''' self.link(self.find_set(__lowerCamelCase ) ,self.find_set(__lowerCamelCase ) ) class lowerCamelCase_ ( Generic[T] ): def __init__( self : Optional[int] ): '''simple docstring''' a = {} def SCREAMING_SNAKE_CASE_ ( self : Dict ,__lowerCamelCase : T ): '''simple docstring''' if node not in self.connections: a = {} def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : T ,__lowerCamelCase : T ,__lowerCamelCase : int ): '''simple docstring''' self.add_node(__lowerCamelCase ) self.add_node(__lowerCamelCase ) a = weight a = weight def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = [] a = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __lowerCamelCase : x[2] ) # creating the disjoint set a = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__lowerCamelCase ) # MST generation a = 0 a = 0 a = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a , a , a = edges[index] index += 1 a = disjoint_set.find_set(__lowerCamelCase ) a = disjoint_set.find_set(__lowerCamelCase ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) disjoint_set.union(__lowerCamelCase ,__lowerCamelCase ) return graph
353
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : str = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } UpperCamelCase__ : Optional[Any] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[Any]: """simple docstring""" for attribute in key.split('''.''' ): a = getattr(snake_case_, snake_case_ ) if weight_type is not None: a = getattr(snake_case_, snake_case_ ).shape else: a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value else: a = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = [] a = fairseq_model.state_dict() a = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', ) a = True else: for key, mapped_key in MAPPING.items(): a = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue a = True if "*" in mapped_key: a = name.split(snake_case_ )[0].split('''.''' )[-2] a = mapped_key.replace('''*''', snake_case_ ) if "weight_g" in name: a = '''weight_g''' elif "weight_v" in name: a = '''weight_v''' elif "bias" in name: a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj a = '''weight''' else: a = None set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Union[str, Any]: """simple docstring""" a = full_name.split('''conv_layers.''' )[-1] a = name.split('''.''' ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Union[str, Any]: """simple docstring""" if config_path is not None: a = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: a = UniSpeechSatConfig() a = '''''' if is_finetuned: a = UniSpeechSatForCTC(snake_case_ ) else: a = UniSpeechSatForPreTraining(snake_case_ ) a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) a = model[0].eval() recursively_load_weights(snake_case_, snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase__ : int = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
330
0
import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCamelCase__ : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ : Dict = direct_transformers_import(PATH_TO_TRANSFORMERS) UpperCamelCase__ : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING UpperCamelCase__ : int = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { """CLIPSegConfig""": True, """DeformableDetrConfig""": True, """DetaConfig""": True, """DinatConfig""": True, """DonutSwinConfig""": True, """EfficientFormerConfig""": True, """FSMTConfig""": True, """JukeboxConfig""": True, """LayoutLMv2Config""": True, """MaskFormerSwinConfig""": True, """MT5Config""": True, """NatConfig""": True, """OneFormerConfig""": True, """PerceiverConfig""": True, """RagConfig""": True, """SpeechT5Config""": True, """SwinConfig""": True, """Swin2SRConfig""": True, """Swinv2Config""": True, """SwitchTransformersConfig""": True, """TableTransformerConfig""": True, """TapasConfig""": True, """TransfoXLConfig""": True, """UniSpeechConfig""": True, """UniSpeechSatConfig""": True, """WavLMConfig""": True, """WhisperConfig""": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) """JukeboxPriorConfig""": True, # TODO: @Younes (for `is_decoder`) """Pix2StructTextConfig""": True, } ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> Any: """simple docstring""" a = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f"""config.{attribute}""" in modeling_source or f"""getattr(config, \"{attribute}\"""" in modeling_source or f"""getattr(self.config, \"{attribute}\"""" in modeling_source ): a = True # Deal with multi-line cases elif ( re.search( rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""", snake_case_, ) is not None ): a = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: a = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files a = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] a = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed a = True if not attribute_used: a = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: a = True elif attribute in ["tie_word_embeddings"] and default_value is False: a = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: a = True elif attribute.endswith('''_token_id''' ): a = True # configuration class specific cases if not case_allowed: a = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] ) a = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[Any]: """simple docstring""" a = dict(inspect.signature(config_class.__init__ ).parameters ) a = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] a = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass a = {} if len(config_class.attribute_map ) > 0: a = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files a = inspect.getsourcefile(snake_case_ ) a = os.path.dirname(snake_case_ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. a = [os.path.join(snake_case_, snake_case_ ) for fn in os.listdir(snake_case_ ) if fn.startswith('''modeling_''' )] # Get the source code strings a = [] for path in modeling_paths: if os.path.isfile(snake_case_ ): with open(snake_case_ ) as fp: modeling_sources.append(fp.read() ) a = [] for config_param, default_value in zip(snake_case_, snake_case_ ): # `attributes` here is all the variant names for `config_param` a = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(snake_case_, snake_case_, snake_case_, snake_case_ ): unused_attributes.append(attributes[0] ) return sorted(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: """simple docstring""" a = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) a = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ), lambda snake_case_ : inspect.isclass(snake_case_ ) and issubclass(snake_case_, snake_case_ ) and inspect.getmodule(snake_case_ ) == inspect.getmodule(_config_class ), ) ] for config_class in config_classes_in_module: a = check_config_attributes_being_used(snake_case_ ) if len(snake_case_ ) > 0: a = unused_attributes if len(snake_case_ ) > 0: a = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += f"""{name}: {attributes}\n""" raise ValueError(snake_case_ ) if __name__ == "__main__": check_config_attributes()
354
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> List[str]: """simple docstring""" monkeypatch.setattr('''datasets.utils.deprecation_utils._emitted_deprecation_warnings''', set() ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" class lowerCamelCase_ : def __init__( self : Dict ,__lowerCamelCase : List[str] ): '''simple docstring''' a = metric_id class lowerCamelCase_ : SCREAMING_SNAKE_CASE_ = [MetricMock(a_ ) for metric_id in ['accuracy', 'mse', 'precision', 'codeparrot/apps_metric']] def SCREAMING_SNAKE_CASE_ ( self : Dict ): '''simple docstring''' return self._metrics monkeypatch.setattr('''datasets.inspect.huggingface_hub''', HfhMock() ) @pytest.mark.parametrize( '''func, args''', [(load_metric, ('''metrics/mse''',)), (list_metrics, ()), (inspect_metric, ('''metrics/mse''', '''tmp_path'''))] ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Tuple: """simple docstring""" if "tmp_path" in args: a = tuple(arg if arg != '''tmp_path''' else tmp_path for arg in args ) with pytest.warns(snake_case_, match='''https://huggingface.co/docs/evaluate''' ): func(*snake_case_ )
330
0
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCamelCase_ : def __init__( self : Union[str, Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Dict=99 ,__lowerCamelCase : Any=13 ,__lowerCamelCase : List[Any]=7 ,__lowerCamelCase : Union[str, Any]=9 ,__lowerCamelCase : List[Any]=True ,__lowerCamelCase : List[str]=True ,__lowerCamelCase : List[Any]=False ,__lowerCamelCase : Dict=32 ,__lowerCamelCase : List[Any]=5 ,__lowerCamelCase : Optional[Any]=4 ,__lowerCamelCase : int=37 ,__lowerCamelCase : List[Any]=8 ,__lowerCamelCase : List[Any]=0.1 ,__lowerCamelCase : Optional[int]=0.002 ,__lowerCamelCase : Tuple=1 ,__lowerCamelCase : Tuple=0 ,__lowerCamelCase : Tuple=0 ,__lowerCamelCase : Any=None ,__lowerCamelCase : Tuple=None ,): '''simple docstring''' a = parent a = batch_size a = encoder_seq_length a = decoder_seq_length # For common tests a = self.decoder_seq_length a = is_training a = use_attention_mask a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = d_ff a = relative_attention_num_buckets a = dropout_rate a = initializer_factor a = eos_token_id a = pad_token_id a = decoder_start_token_id a = None a = decoder_layers def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' return TaConfig.from_pretrained('''google/umt5-base''' ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : str ,__lowerCamelCase : int ,__lowerCamelCase : Any ,__lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : Dict=None ,__lowerCamelCase : Optional[int]=None ,__lowerCamelCase : Union[str, Any]=None ,__lowerCamelCase : int=None ,): '''simple docstring''' if attention_mask is None: a = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: a = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: a = torch.ones(config.num_hidden_layers ,config.num_attention_heads ,device=__lowerCamelCase ) if decoder_head_mask is None: a = torch.ones(config.num_decoder_layers ,config.num_attention_heads ,device=__lowerCamelCase ) if cross_attn_head_mask is None: a = torch.ones( config.num_decoder_layers ,config.num_attention_heads ,device=__lowerCamelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = ids_tensor([self.batch_size, self.encoder_seq_length] ,self.vocab_size ) a = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input a = input_ids.clamp(self.pad_token_id + 1 ) a = decoder_input_ids.clamp(self.pad_token_id + 1 ) a = self.get_config() a = config.num_attention_heads a = self.prepare_inputs_dict(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) return config, input_dict def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' a , a = self.prepare_config_and_inputs() return config, inputs_dict def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' return TaConfig( vocab_size=1_66 ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' return TaConfig( vocab_size=self.vocab_size ,d_model=self.hidden_size ,d_ff=self.d_ff ,d_kv=self.hidden_size // self.num_attention_heads ,num_layers=self.num_hidden_layers ,num_decoder_layers=self.decoder_layers ,num_heads=self.num_attention_heads ,relative_attention_num_buckets=self.relative_attention_num_buckets ,dropout_rate=self.dropout_rate ,initializer_factor=self.initializer_factor ,eos_token_id=self.eos_token_id ,bos_token_id=self.pad_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,) def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Any ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[Any] ,): '''simple docstring''' a = UMTaModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model( input_ids=__lowerCamelCase ,decoder_input_ids=__lowerCamelCase ,attention_mask=__lowerCamelCase ,decoder_attention_mask=__lowerCamelCase ,) a = model(input_ids=__lowerCamelCase ,decoder_input_ids=__lowerCamelCase ) a = result.last_hidden_state a = result.past_key_values a = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() ,(self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() ,(self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(__lowerCamelCase ) ,config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) ,4 ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Union[str, Any] ,): '''simple docstring''' a = UMTaModel(config=__lowerCamelCase ).get_decoder().to(__lowerCamelCase ).eval() # first forward pass a = model(__lowerCamelCase ,use_cache=__lowerCamelCase ) a = model(__lowerCamelCase ) a = model(__lowerCamelCase ,use_cache=__lowerCamelCase ) self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) ) self.parent.assertTrue(len(__lowerCamelCase ) == len(__lowerCamelCase ) + 1 ) a , a = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids a = ids_tensor((self.batch_size, 1) ,config.vocab_size ) # append to next input_ids and a = torch.cat([input_ids, next_tokens] ,dim=-1 ) a = model(__lowerCamelCase )['''last_hidden_state'''] a = model(__lowerCamelCase ,past_key_values=__lowerCamelCase )['''last_hidden_state'''] # select random slice a = ids_tensor((1,) ,output_from_past.shape[-1] ).item() a = output_from_no_past[:, -1, random_slice_idx].detach() a = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__lowerCamelCase ,__lowerCamelCase ,atol=1e-3 ) ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : str ,__lowerCamelCase : str ,): '''simple docstring''' a = UMTaModel(config=__lowerCamelCase ).to(__lowerCamelCase ).half().eval() a = model(**__lowerCamelCase )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(__lowerCamelCase ).any().item() ) @require_torch class lowerCamelCase_ ( a_ , a_ , a_ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ = (UMTaForConditionalGeneration,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True # The small UMT5 model needs higher percentages for CPU/MP tests SCREAMING_SNAKE_CASE_ = [0.8, 0.9] def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' a = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() a = UMTaModel(config_and_inputs[0] ).to(__lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( __lowerCamelCase ,(config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) ,F"""{tmpdirname}/t5_test.onnx""" ,export_params=__lowerCamelCase ,opset_version=9 ,input_names=['''input_ids''', '''decoder_input_ids'''] ,) @unittest.skipIf(torch_device == '''cpu''' ,'''Cant do half precision''' ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] a = self.model_tester.prepare_config_and_inputs() a = config_and_inputs[0] a = UMTaForConditionalGeneration(__lowerCamelCase ).eval() model.to(__lowerCamelCase ) a = { '''head_mask''': torch.zeros(config.num_layers ,config.num_heads ,device=__lowerCamelCase ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__lowerCamelCase ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers ,config.num_heads ,device=__lowerCamelCase ), } for attn_name, (name, mask) in zip(__lowerCamelCase ,head_masking.items() ): a = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": a = torch.ones( config.num_decoder_layers ,config.num_heads ,device=__lowerCamelCase ) a = model.generate( config_and_inputs[1]['''input_ids'''] ,num_beams=1 ,max_length=3 ,output_attentions=__lowerCamelCase ,return_dict_in_generate=__lowerCamelCase ,**__lowerCamelCase ,) # We check the state of decoder_attentions and cross_attentions just from the last step a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) ,0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase_ ( unittest.TestCase ): @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' ,return_dict=__lowerCamelCase ).to(__lowerCamelCase ) a = AutoTokenizer.from_pretrained('''google/umt5-small''' ,use_fast=__lowerCamelCase ,legacy=__lowerCamelCase ) a = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] a = tokenizer(__lowerCamelCase ,return_tensors='''pt''' ,padding=__lowerCamelCase ).input_ids # fmt: off a = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(__lowerCamelCase ,__lowerCamelCase ) a = model.generate(input_ids.to(__lowerCamelCase ) ) a = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] a = tokenizer.batch_decode(__lowerCamelCase ) self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
355
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : str = logging.get_logger(__name__) UpperCamelCase__ : Optional[int] = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'luke' def __init__( self : Dict ,__lowerCamelCase : Optional[Any]=5_02_67 ,__lowerCamelCase : str=50_00_00 ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : int=2_56 ,__lowerCamelCase : Optional[int]=12 ,__lowerCamelCase : Tuple=12 ,__lowerCamelCase : Any=30_72 ,__lowerCamelCase : Any="gelu" ,__lowerCamelCase : Any=0.1 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : Tuple=5_12 ,__lowerCamelCase : int=2 ,__lowerCamelCase : Optional[int]=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=True ,__lowerCamelCase : Tuple=None ,__lowerCamelCase : Any=1 ,__lowerCamelCase : Dict=0 ,__lowerCamelCase : Any=2 ,**__lowerCamelCase : str ,): '''simple docstring''' super().__init__(pad_token_id=__lowerCamelCase ,bos_token_id=__lowerCamelCase ,eos_token_id=__lowerCamelCase ,**__lowerCamelCase ) a = vocab_size a = entity_vocab_size a = hidden_size a = entity_emb_size a = num_hidden_layers a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = use_entity_aware_attention a = classifier_dropout
330
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase__ : List[str] = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""", """self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""", """self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""", """self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""", """self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""", """self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""", """self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""", """self_attn.rotary_emb""": """encoder.embed_positions""", """self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""", """conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""", """conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""", """conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""", """conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""", """conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""", """ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""", """ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""", """ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""", """ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""", """ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""", """ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", } UpperCamelCase__ : Optional[int] = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> str: """simple docstring""" for attribute in key.split('''.''' ): a = getattr(snake_case_, snake_case_ ) if weight_type is not None: a = getattr(snake_case_, snake_case_ ).shape else: a = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": a = value elif weight_type == "weight_g": a = value elif weight_type == "weight_v": a = value elif weight_type == "bias": a = value elif weight_type == "running_mean": a = value elif weight_type == "running_var": a = value elif weight_type == "num_batches_tracked": a = value elif weight_type == "inv_freq": a = value else: a = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Dict: """simple docstring""" a = [] a = fairseq_model.state_dict() a = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): a = False if "conv_layers" in name: load_conv_layer( snake_case_, snake_case_, snake_case_, snake_case_, hf_model.config.feat_extract_norm == '''group''', ) a = True else: for key, mapped_key in MAPPING.items(): a = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: a = True if "*" in mapped_key: a = name.split(snake_case_ )[0].split('''.''' )[-2] a = mapped_key.replace('''*''', snake_case_ ) if "pos_bias_u" in name: a = None elif "pos_bias_v" in name: a = None elif "weight_g" in name: a = '''weight_g''' elif "weight_v" in name: a = '''weight_v''' elif "bias" in name: a = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj a = '''weight''' elif "running_mean" in name: a = '''running_mean''' elif "inv_freq" in name: a = '''inv_freq''' elif "running_var" in name: a = '''running_var''' elif "num_batches_tracked" in name: a = '''num_batches_tracked''' else: a = None set_recursively(snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> List[str]: """simple docstring""" a = full_name.split('''conv_layers.''' )[-1] a = name.split('''.''' ) a = int(items[0] ) a = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) a = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_=None, snake_case_=None, snake_case_=True ) -> Dict: """simple docstring""" if config_path is not None: a = WavaVecaConformerConfig.from_pretrained(snake_case_, hidden_act='''swish''' ) else: a = WavaVecaConformerConfig() if "rope" in checkpoint_path: a = '''rotary''' if is_finetuned: if dict_path: a = Dictionary.load(snake_case_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq a = target_dict.pad_index a = target_dict.bos_index a = target_dict.eos_index a = len(target_dict.symbols ) a = os.path.join(snake_case_, '''vocab.json''' ) if not os.path.isdir(snake_case_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case_ ) ) return os.makedirs(snake_case_, exist_ok=snake_case_ ) a = target_dict.indices # fairseq has the <pad> and <s> switched a = 0 a = 1 with open(snake_case_, '''w''', encoding='''utf-8''' ) as vocab_handle: json.dump(snake_case_, snake_case_ ) a = WavaVecaCTCTokenizer( snake_case_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=snake_case_, ) a = True if config.feat_extract_norm == '''layer''' else False a = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=1_6_0_0_0, padding_value=0, do_normalize=snake_case_, return_attention_mask=snake_case_, ) a = WavaVecaProcessor(feature_extractor=snake_case_, tokenizer=snake_case_ ) processor.save_pretrained(snake_case_ ) a = WavaVecaConformerForCTC(snake_case_ ) else: a = WavaVecaConformerForPreTraining(snake_case_ ) if is_finetuned: a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: a = argparse.Namespace(task='''audio_pretraining''' ) a = fairseq.tasks.setup_task(snake_case_ ) a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=snake_case_ ) a = model[0].eval() recursively_load_weights(snake_case_, snake_case_, not is_finetuned ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": UpperCamelCase__ : Any = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) UpperCamelCase__ : Tuple = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
356
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": UpperCamelCase__ : Optional[int] = pd.read_csv("""sample_data.csv""", header=None) UpperCamelCase__ : Tuple = df.shape[:1][0] # If you're using some other dataset input the target column UpperCamelCase__ : List[Any] = df.iloc[:, 1:2] UpperCamelCase__ : Union[str, Any] = actual_data.values.reshape(len_data, 1) UpperCamelCase__ : List[Any] = MinMaxScaler().fit_transform(actual_data) UpperCamelCase__ : Optional[Any] = 10 UpperCamelCase__ : int = 5 UpperCamelCase__ : List[str] = 20 UpperCamelCase__ : Optional[int] = len_data - periods * look_back UpperCamelCase__ : Union[str, Any] = actual_data[:division] UpperCamelCase__ : str = actual_data[division - look_back :] UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = [], [] UpperCamelCase__ , UpperCamelCase__ : str = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) UpperCamelCase__ : List[str] = np.array(train_x) UpperCamelCase__ : Optional[Any] = np.array(test_x) UpperCamelCase__ : Tuple = np.array([list(i.ravel()) for i in train_y]) UpperCamelCase__ : Optional[Any] = np.array([list(i.ravel()) for i in test_y]) UpperCamelCase__ : Union[str, Any] = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") UpperCamelCase__ : Tuple = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) UpperCamelCase__ : Tuple = model.predict(x_test)
330
0
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() UpperCamelCase__ : Any = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model UpperCamelCase__ : Optional[Any] = { # fairseq: """wmt19-ru-en""": {"""length_penalty""": 1.1}, """wmt19-en-ru""": {"""length_penalty""": 1.1_5}, """wmt19-en-de""": {"""length_penalty""": 1.0}, """wmt19-de-en""": {"""length_penalty""": 1.1}, # allenai: """wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6}, """wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6}, """wmt16-en-de-12-1""": {"""length_penalty""": 0.8}, """wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6}, """wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6}, } # this remaps the different models to their organization names UpperCamelCase__ : Optional[int] = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCamelCase__ : Optional[int] = """facebook""" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: UpperCamelCase__ : List[Any] = """allenai""" def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Dict: """simple docstring""" a = dict((re.sub(r'''@@$''', '''''', snake_case_ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''', '''</w>''', snake_case_ ), v) for k, v in d.items() ) a = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[f"""{k}</w>"""] a = d[k] # restore return da def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> List[str]: """simple docstring""" assert os.path.exists(snake_case_ ) os.makedirs(snake_case_, exist_ok=snake_case_ ) print(f"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models a = basename(snake_case_ ) a = dirname(snake_case_ ) a = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel a = cls.hub_models() a = {'''bpe''': '''fastbpe''', '''tokenizer''': '''moses'''} a = '''.''' # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(f"""using checkpoint {checkpoint_file}""" ) a = hub_utils.from_pretrained( snake_case_, snake_case_, snake_case_, archive_map=snake_case_, **snake_case_ ) a = vars(chkpt['''args''']['''model'''] ) a = args['''source_lang'''] a = args['''target_lang'''] a = dirname(snake_case_ ) a = basename(snake_case_ ) # dicts a = os.path.join(snake_case_, f"""dict.{src_lang}.txt""" ) a = os.path.join(snake_case_, f"""dict.{tgt_lang}.txt""" ) a = Dictionary.load(snake_case_ ) a = rewrite_dict_keys(src_dict.indices ) a = len(snake_case_ ) a = os.path.join(snake_case_, '''vocab-src.json''' ) print(f"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" ) with open(snake_case_, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab a = True for k in src_vocab.keys(): if not k.islower(): a = False break a = Dictionary.load(snake_case_ ) a = rewrite_dict_keys(tgt_dict.indices ) a = len(snake_case_ ) a = os.path.join(snake_case_, '''vocab-tgt.json''' ) print(f"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" ) with open(snake_case_, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) ) # merges_file (bpecodes) a = os.path.join(snake_case_, VOCAB_FILES_NAMES['''merges_file'''] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" a = os.path.join(snake_case_, snake_case_ ) if os.path.exists(snake_case_ ): break with open(snake_case_, encoding='''utf-8''' ) as fin: a = fin.read() a = re.sub(r''' \d+$''', '''''', snake_case_, 0, re.M ) # remove frequency number print(f"""Generating {merges_file}""" ) with open(snake_case_, '''w''', encoding='''utf-8''' ) as fout: fout.write(snake_case_ ) # model config a = os.path.join(snake_case_, '''config.json''' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", f"""need to extend tokenizer to support bpe={args["bpe"]}""" assert args["tokenizer"] == "moses", f"""need to extend tokenizer to support bpe={args["tokenizer"]}""" a = { '''architectures''': ['''FSMTForConditionalGeneration'''], '''model_type''': '''fsmt''', '''activation_dropout''': args['''activation_dropout'''], '''activation_function''': '''relu''', '''attention_dropout''': args['''attention_dropout'''], '''d_model''': args['''decoder_embed_dim'''], '''dropout''': args['''dropout'''], '''init_std''': 0.02, '''max_position_embeddings''': args['''max_source_positions'''], '''num_hidden_layers''': args['''encoder_layers'''], '''src_vocab_size''': src_vocab_size, '''tgt_vocab_size''': tgt_vocab_size, '''langs''': [src_lang, tgt_lang], '''encoder_attention_heads''': args['''encoder_attention_heads'''], '''encoder_ffn_dim''': args['''encoder_ffn_embed_dim'''], '''encoder_layerdrop''': args['''encoder_layerdrop'''], '''encoder_layers''': args['''encoder_layers'''], '''decoder_attention_heads''': args['''decoder_attention_heads'''], '''decoder_ffn_dim''': args['''decoder_ffn_embed_dim'''], '''decoder_layerdrop''': args['''decoder_layerdrop'''], '''decoder_layers''': args['''decoder_layers'''], '''bos_token_id''': 0, '''pad_token_id''': 1, '''eos_token_id''': 2, '''is_encoder_decoder''': True, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_all_embeddings'''], } # good hparam defaults to start with a = 5 a = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: a = best_score_hparams[model_dir]['''length_penalty'''] else: a = 1.0 print(f"""Generating {fsmt_model_config_file}""" ) with open(snake_case_, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) ) # tokenizer config a = os.path.join(snake_case_, snake_case_ ) a = { '''langs''': [src_lang, tgt_lang], '''model_max_length''': 1_0_2_4, '''do_lower_case''': do_lower_case, } print(f"""Generating {fsmt_tokenizer_config_file}""" ) with open(snake_case_, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case_, ensure_ascii=snake_case_, indent=snake_case_ ) ) # model a = chkpt['''models'''][0] a = model.state_dict() # rename keys to start with 'model.' a = OrderedDict(('''model.''' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys a = [ '''model.model''', '''model.encoder.version''', '''model.decoder.version''', '''model.encoder_embed_tokens.weight''', '''model.decoder_embed_tokens.weight''', '''model.encoder.embed_positions._float_tensor''', '''model.decoder.embed_positions._float_tensor''', ] for k in ignore_keys: model_state_dict.pop(snake_case_, snake_case_ ) a = FSMTConfig.from_pretrained(snake_case_ ) a = FSMTForConditionalGeneration(snake_case_ ) # check that it loads ok model_new.load_state_dict(snake_case_, strict=snake_case_ ) # save a = os.path.join(snake_case_, snake_case_ ) print(f"""Generating {pytorch_weights_dump_path}""" ) torch.save(snake_case_, snake_case_ ) print('''Conversion is done!''' ) print('''\nLast step is to upload the files to s3''' ) print(f"""cd {data_root}""" ) print(f"""transformers-cli upload {model_dir}""" ) if __name__ == "__main__": UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--fsmt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCamelCase__ : List[str] = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
357
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Tuple: """simple docstring""" a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = FileLock(str(tmpdir / '''foo.lock''' ) ) a = 0.01 with locka.acquire(): with pytest.raises(snake_case_ ): a = time.time() locka.acquire(snake_case_ ) assert time.time() - _start > timeout def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Optional[int]: """simple docstring""" a = '''a''' * 1_0_0_0 + '''.lock''' a = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(snake_case_ ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 a = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(snake_case_ ): locka.acquire(0 )
330
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : int = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class lowerCamelCase_ ( a_ ): """simple docstring""" SCREAMING_SNAKE_CASE_ = 'glpn' def __init__( self : int ,__lowerCamelCase : List[Any]=3 ,__lowerCamelCase : Tuple=4 ,__lowerCamelCase : Optional[int]=[2, 2, 2, 2] ,__lowerCamelCase : List[str]=[8, 4, 2, 1] ,__lowerCamelCase : Optional[int]=[32, 64, 1_60, 2_56] ,__lowerCamelCase : List[Any]=[7, 3, 3, 3] ,__lowerCamelCase : Dict=[4, 2, 2, 2] ,__lowerCamelCase : List[str]=[1, 2, 5, 8] ,__lowerCamelCase : int=[4, 4, 4, 4] ,__lowerCamelCase : Union[str, Any]="gelu" ,__lowerCamelCase : Any=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : str=0.02 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : List[Any]=1e-6 ,__lowerCamelCase : Optional[int]=64 ,__lowerCamelCase : Optional[int]=10 ,__lowerCamelCase : Tuple=-1 ,**__lowerCamelCase : Optional[Any] ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = num_channels a = num_encoder_blocks a = depths a = sr_ratios a = hidden_sizes a = patch_sizes a = strides a = mlp_ratios a = num_attention_heads a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = drop_path_rate a = layer_norm_eps a = decoder_hidden_size a = max_depth a = head_in_index
358
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ : Optional[int] = logging.get_logger(__name__) UpperCamelCase__ : Dict = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = 'vit_mae' def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,): '''simple docstring''' super().__init__(**__lowerCamelCase ) a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = initializer_range a = layer_norm_eps a = image_size a = patch_size a = num_channels a = qkv_bias a = decoder_num_attention_heads a = decoder_hidden_size a = decoder_num_hidden_layers a = decoder_intermediate_size a = mask_ratio a = norm_pix_loss
330
0
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]: """simple docstring""" a = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') a = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(snake_case_ ): os.makedirs(snake_case_ ) a = model.state_dict() def to_tf_var_name(snake_case_ ): for patt, repl in iter(snake_case_ ): a = name.replace(snake_case_, snake_case_ ) return f"""bert/{name}""" def create_tf_var(snake_case_, snake_case_, snake_case_ ): a = tf.dtypes.as_dtype(tensor.dtype ) a = tf.get_variable(dtype=snake_case_, shape=tensor.shape, name=snake_case_, initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(snake_case_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: a = to_tf_var_name(snake_case_ ) a = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): a = torch_tensor.T a = create_tf_var(tensor=snake_case_, name=snake_case_, session=snake_case_ ) tf.keras.backend.set_value(snake_case_, snake_case_ ) a = session.run(snake_case_ ) print(f"""Successfully created {tf_name}: {np.allclose(snake_case_, snake_case_ )}""" ) a = tf.train.Saver(tf.trainable_variables() ) saver.save(snake_case_, os.path.join(snake_case_, model_name.replace('''-''', '''_''' ) + '''.ckpt''' ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_=None ) -> Optional[int]: """simple docstring""" a = argparse.ArgumentParser() parser.add_argument('''--model_name''', type=snake_case_, required=snake_case_, help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''', type=snake_case_, default=snake_case_, required=snake_case_, help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''', type=snake_case_, required=snake_case_, help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''', type=snake_case_, required=snake_case_, help='''Directory in which to save tensorflow model''' ) a = parser.parse_args(snake_case_ ) a = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name, state_dict=torch.load(args.pytorch_model_path ), cache_dir=args.cache_dir, ) convert_pytorch_checkpoint_to_tf(model=snake_case_, ckpt_dir=args.tf_cache_dir, model_name=args.model_name ) if __name__ == "__main__": main()
359
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]: """simple docstring""" stooge(snake_case_, 0, len(snake_case_ ) - 1 ) return arr def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]: """simple docstring""" if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: a , a = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: a = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) # Recursively sort last 2/3 elements stooge(snake_case_, i + t, (snake_case_) ) # Recursively sort first 2/3 elements stooge(snake_case_, snake_case_, (h - t) ) if __name__ == "__main__": UpperCamelCase__ : Dict = input("""Enter numbers separated by a comma:\n""").strip() UpperCamelCase__ : Optional[int] = [int(item) for item in user_input.split(""",""")] print(stooge_sort(unsorted))
330
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCamelCase__ : Any = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : List[str] = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCamelCase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
360
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy UpperCamelCase__ : Any = logging.get_logger(__name__) UpperCamelCase__ : Optional[Any] = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } UpperCamelCase__ : Union[str, Any] = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } UpperCamelCase__ : str = { """jukebox""": 512, } class lowerCamelCase_ ( a_ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_LYRIC_TOKENS_SIZES SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask'] def __init__( self : Optional[Any] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Tuple ,__lowerCamelCase : Union[str, Any]=["v3", "v2", "v2"] ,__lowerCamelCase : List[Any]=5_12 ,__lowerCamelCase : Tuple=5 ,__lowerCamelCase : List[Any]="<|endoftext|>" ,**__lowerCamelCase : List[str] ,): '''simple docstring''' a = AddedToken(__lowerCamelCase ,lstrip=__lowerCamelCase ,rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase ,__lowerCamelCase ) else unk_token super().__init__( unk_token=__lowerCamelCase ,n_genres=__lowerCamelCase ,version=__lowerCamelCase ,max_n_lyric_tokens=__lowerCamelCase ,**__lowerCamelCase ,) a = version a = max_n_lyric_tokens a = n_genres with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle: a = json.load(__lowerCamelCase ) a = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder ) == 79: a = oov.replace(r'''\-\'''' ,r'''\-+\'''' ) a = regex.compile(__lowerCamelCase ) a = {v: k for k, v in self.artists_encoder.items()} a = {v: k for k, v in self.genres_encoder.items()} a = {v: k for k, v in self.lyrics_encoder.items()} @property def SCREAMING_SNAKE_CASE_ ( self : str ): '''simple docstring''' return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : List[Any] ): '''simple docstring''' a = [self.artists_encoder.get(__lowerCamelCase ,0 ) for artist in list_artists] for genres in range(len(__lowerCamelCase ) ): a = [self.genres_encoder.get(__lowerCamelCase ,0 ) for genre in list_genres[genres]] a = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] )) a = [[self.lyrics_encoder.get(__lowerCamelCase ,0 ) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def SCREAMING_SNAKE_CASE_ ( self : Tuple ,__lowerCamelCase : List[str] ): '''simple docstring''' return list(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : List[Any] ,__lowerCamelCase : Any ,__lowerCamelCase : Optional[int] ,**__lowerCamelCase : Optional[Any] ): '''simple docstring''' a , a , a = self.prepare_for_tokenization(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = self._tokenize(__lowerCamelCase ) return artist, genre, lyrics def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : str ,__lowerCamelCase : bool = False ): '''simple docstring''' for idx in range(len(self.version ) ): if self.version[idx] == "v3": a = artists[idx].lower() a = [genres[idx].lower()] else: a = self._normalize(artists[idx] ) + '''.v2''' a = [ self._normalize(__lowerCamelCase ) + '''.v2''' for genre in genres[idx].split('''_''' ) ] # split is for the full dictionary with combined genres if self.version[0] == "v2": a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+''' ) a = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n''' a = {vocab[index]: index + 1 for index in range(len(__lowerCamelCase ) )} a = 0 a = len(__lowerCamelCase ) + 1 a = self.vocab a = {v: k for k, v in self.vocab.items()} a = '''''' else: a = regex.compile(r'''[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+''' ) a = self._run_strip_accents(__lowerCamelCase ) a = lyrics.replace('''\\''' ,'''\n''' ) a = self.out_of_vocab.sub('''''' ,__lowerCamelCase ), [], [] return artists, genres, lyrics def SCREAMING_SNAKE_CASE_ ( self : str ,__lowerCamelCase : int ): '''simple docstring''' a = unicodedata.normalize('''NFD''' ,__lowerCamelCase ) a = [] for char in text: a = unicodedata.category(__lowerCamelCase ) if cat == "Mn": continue output.append(__lowerCamelCase ) return "".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ): '''simple docstring''' a = ( [chr(__lowerCamelCase ) for i in range(ord('''a''' ) ,ord('''z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''A''' ) ,ord('''Z''' ) + 1 )] + [chr(__lowerCamelCase ) for i in range(ord('''0''' ) ,ord('''9''' ) + 1 )] + ['''.'''] ) a = frozenset(__lowerCamelCase ) a = re.compile(r'''_+''' ) a = ''''''.join([c if c in accepted else '''_''' for c in text.lower()] ) a = pattern.sub('''_''' ,__lowerCamelCase ).strip('''_''' ) return text def SCREAMING_SNAKE_CASE_ ( self : List[str] ,__lowerCamelCase : List[str] ): '''simple docstring''' return " ".join(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : str ,__lowerCamelCase : Optional[Union[str, TensorType]] = None ,__lowerCamelCase : bool = False ): '''simple docstring''' if not isinstance(__lowerCamelCase ,__lowerCamelCase ): a = TensorType(__lowerCamelCase ) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( '''Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.''' ) import tensorflow as tf a = tf.constant a = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError('''Unable to convert output to PyTorch tensors format, PyTorch is not installed.''' ) import torch a = torch.tensor a = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError('''Unable to convert output to JAX tensors format, JAX is not installed.''' ) import jax.numpy as jnp # noqa: F811 a = jnp.array a = _is_jax else: a = np.asarray a = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: a = [inputs] if not is_tensor(__lowerCamelCase ): a = as_tensor(__lowerCamelCase ) except: # noqa E722 raise ValueError( '''Unable to create tensor, you should probably activate truncation and/or padding ''' '''with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.''' ) return inputs def __call__( self : Tuple ,__lowerCamelCase : Tuple ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : List[str]="" ,__lowerCamelCase : List[Any]="pt" ): '''simple docstring''' a = [0, 0, 0] a = [artist] * len(self.version ) a = [genres] * len(self.version ) a , a , a = self.tokenize(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a , a , a = self._convert_token_to_id(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) a = [-INFINITY] * len(full_tokens[-1] ) a = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=__lowerCamelCase ) for i in range(len(self.version ) ) ] return BatchEncoding({'''input_ids''': input_ids, '''attention_masks''': attention_masks} ) def SCREAMING_SNAKE_CASE_ ( self : int ,__lowerCamelCase : str ,__lowerCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__lowerCamelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''artists_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.artists_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''genres_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.genres_encoder ,ensure_ascii=__lowerCamelCase ) ) a = os.path.join( __lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''lyrics_file'''] ) with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=__lowerCamelCase ) ) return (artists_file, genres_file, lyrics_file) def SCREAMING_SNAKE_CASE_ ( self : Any ,__lowerCamelCase : Any ,__lowerCamelCase : Any ,__lowerCamelCase : str ): '''simple docstring''' a = self.artists_decoder.get(__lowerCamelCase ) a = [self.genres_decoder.get(__lowerCamelCase ) for genre in genres_index] a = [self.lyrics_decoder.get(__lowerCamelCase ) for character in lyric_index] return artist, genres, lyrics
330
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase__ : str = { """configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""], """tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : Optional[Any] = [ """TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """AdaptiveEmbedding""", """TransfoXLForSequenceClassification""", """TransfoXLLMHeadModel""", """TransfoXLModel""", """TransfoXLPreTrainedModel""", """load_tf_weights_in_transfo_xl""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ : str = [ """TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFAdaptiveEmbedding""", """TFTransfoXLForSequenceClassification""", """TFTransfoXLLMHeadModel""", """TFTransfoXLMainLayer""", """TFTransfoXLModel""", """TFTransfoXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
361
# This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES UpperCamelCase__ : Optional[Any] = """tiny-wmt19-en-ru""" # Build # borrowed from a test UpperCamelCase__ : Any = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """w</w>""", """r</w>""", """t</w>""", """lo""", """low""", """er</w>""", """low</w>""", """lowest</w>""", """newer</w>""", """wider</w>""", """<unk>""", ] UpperCamelCase__ : List[Any] = dict(zip(vocab, range(len(vocab)))) UpperCamelCase__ : Any = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""] with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase__ : Optional[Any] = Path(tmpdirname) UpperCamelCase__ : Tuple = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""] UpperCamelCase__ : int = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""] UpperCamelCase__ : Union[str, Any] = build_dir / VOCAB_FILES_NAMES["""merges_file"""] with open(src_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, """w""") as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, """w""") as fp: fp.write("""\n""".join(merges)) UpperCamelCase__ : Dict = FSMTTokenizer( langs=["""en""", """ru"""], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) UpperCamelCase__ : Union[str, Any] = FSMTConfig( langs=["""ru""", """en"""], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) UpperCamelCase__ : Union[str, Any] = FSMTForConditionalGeneration(config) print(F"num of params {tiny_model.num_parameters()}") # Test UpperCamelCase__ : List[str] = tokenizer(["""Making tiny model"""], return_tensors="""pt""") UpperCamelCase__ : Tuple = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-ru
330
0
def SCREAMING_SNAKE_CASE__ ( ) -> list[list[int]]: """simple docstring""" return [list(range(1_0_0_0 - i, -1_0_0_0 - i, -1 ) ) for i in range(1_0_0_0 )] UpperCamelCase__ : Optional[int] = generate_large_matrix() UpperCamelCase__ : List[str] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> None: """simple docstring""" assert all(row == sorted(snake_case_, reverse=snake_case_ ) for row in grid ) assert all(list(snake_case_ ) == sorted(snake_case_, reverse=snake_case_ ) for col in zip(*snake_case_ ) ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" a = 0 a = len(snake_case_ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: a = (left + right) // 2 a = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: a = mid + 1 else: a = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(snake_case_ ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" a = 0 a = len(grid[0] ) for i in range(len(snake_case_ ) ): a = find_negative_index(grid[i][:bound] ) total += bound return (len(snake_case_ ) * len(grid[0] )) - total def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> int: """simple docstring""" a = 0 for row in grid: for i, number in enumerate(snake_case_ ): if number < 0: total += len(snake_case_ ) - i break return total def SCREAMING_SNAKE_CASE__ ( ) -> None: """simple docstring""" from timeit import timeit print('''Running benchmarks''' ) a = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): a = timeit(f"""{func}(grid=grid)""", setup=snake_case_, number=5_0_0 ) print(f"""{func}() took {time:0.4f} seconds""" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
362
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) UpperCamelCase__ : Optional[Any] = """bert-base-cased""" UpperCamelCase__ : int = """fp16""" UpperCamelCase__ : str = """bf16""" UpperCamelCase__ : List[Any] = [FPaa, BFaa] @require_fsdp @require_cuda class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' super().setUp() a = dict( ACCELERATE_USE_FSDP='''true''' ,MASTER_ADDR='''localhost''' ,MASTER_PORT='''10999''' ,RANK='''0''' ,LOCAL_RANK='''0''' ,WORLD_SIZE='''1''' ,) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = F"""{i + 1}""" a = strategy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy ,ShardingStrategy(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = prefetch_policy with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch ,BackwardPrefetch(i + 1 ) ) def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__lowerCamelCase ): a = self.dist_env.copy() a = state_dict_type with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type ,StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def SCREAMING_SNAKE_CASE_ ( self : Tuple ): '''simple docstring''' a = AutoModel.from_pretrained(__lowerCamelCase ) for policy in FSDP_AUTO_WRAP_POLICY: a = self.dist_env.copy() a = policy if policy == "TRANSFORMER_BASED_WRAP": a = '''BertLayer''' elif policy == "SIZE_BASED_WRAP": a = '''2000''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) a = self.dist_env.copy() a = '''TRANSFORMER_BASED_WRAP''' a = '''T5Layer''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() with self.assertRaises(__lowerCamelCase ) as cm: fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) ) a = self.dist_env.copy() a = '''SIZE_BASED_WRAP''' a = '''0''' with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: a = self.dist_env.copy() a = mp_dtype with mockenv_context(**__lowerCamelCase ): a = Accelerator() if mp_dtype == "fp16": a = torch.floataa elif mp_dtype == "bf16": a = torch.bfloataa a = MixedPrecision(param_dtype=__lowerCamelCase ,reduce_dtype=__lowerCamelCase ,buffer_dtype=__lowerCamelCase ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy ,__lowerCamelCase ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler ,__lowerCamelCase ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(__lowerCamelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: a = self.dist_env.copy() a = str(__lowerCamelCase ).lower() with mockenv_context(**__lowerCamelCase ): a = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload ,CPUOffload(offload_params=__lowerCamelCase ) ) @require_fsdp @require_multi_gpu @slow class lowerCamelCase_ ( a_ ): def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): '''simple docstring''' super().setUp() a = 0.82 a = [ '''fsdp_shard_grad_op_transformer_based_wrap''', '''fsdp_full_shard_transformer_based_wrap''', ] a = { '''multi_gpu_fp16''': 32_00, '''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 20_00, '''fsdp_full_shard_transformer_based_wrap_fp16''': 19_00, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } a = 1_60 a = 1_60 a = inspect.getfile(accelerate.test_utils ) a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] ) def SCREAMING_SNAKE_CASE_ ( self : List[str] ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_performance.py''' ) a = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp'''] for config in self.performance_configs: a = cmd.copy() for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in config: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "fp32" in config: cmd_config.append('''--mixed_precision=no''' ) else: cmd_config.append('''--mixed_precision=fp16''' ) if "cpu_offload" in config: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--performance_lower_bound={self.performance_lower_bound}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : Any ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_checkpointing.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''', '''--mixed_precision=fp16''', '''--fsdp_transformer_layer_cls_to_wrap=BertLayer''', ] for i, strategy in enumerate(__lowerCamelCase ): a = cmd.copy() cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) if strategy != "FULL_SHARD": continue a = len(__lowerCamelCase ) for state_dict_type in FSDP_STATE_DICT_TYPE: a = cmd_config[:state_dict_config_index] cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", '''--partial_train_epoch=1''', ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) a = cmd_config[:-1] a = os.path.join(self.tmpdir ,'''epoch_0''' ) cmd_config.extend( [ F"""--resume_from_checkpoint={resume_from_checkpoint}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() ) def SCREAMING_SNAKE_CASE_ ( self : int ): '''simple docstring''' a = os.path.join(self.test_scripts_folder ,'''test_peak_memory_usage.py''' ) a = [ '''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): a = cmd.copy() if "fp16" in spec: cmd_config.extend(['''--mixed_precision=fp16'''] ) else: cmd_config.extend(['''--mixed_precision=no'''] ) if "multi_gpu" in spec: continue else: cmd_config.extend(['''--use_fsdp'''] ) for i, strategy in enumerate(__lowerCamelCase ): if strategy.lower() in spec: cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" ) break if "cpu_offload" in spec: cmd_config.append('''--fsdp_offload_params=True''' ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' ) elif policy == "SIZE_BASED_WRAP": cmd_config.append('''--fsdp_min_num_params=2000''' ) cmd_config.extend( [ self.test_file_path, F"""--output_dir={self.tmpdir}""", F"""--peak_memory_upper_bound={peak_mem_upper_bound}""", F"""--n_train={self.n_train}""", F"""--n_val={self.n_val}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__lowerCamelCase ,env=os.environ.copy() )
330
0