code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @property def __A ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) __magic_name__ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model @property def __A ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) __magic_name__ = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , ) return model @property def __A ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) __magic_name__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(A ) def __A ( self ) -> str: '''simple docstring''' __magic_name__ = self.dummy_uncond_unet __magic_name__ = DDIMScheduler() __magic_name__ = self.dummy_vq_model __magic_name__ = LDMPipeline(unet=A , vqvae=A , scheduler=A ) ldm.to(A ) ldm.set_progress_bar_config(disable=A ) __magic_name__ = torch.manual_seed(0 ) __magic_name__ = ldm(generator=A , num_inference_steps=2 , output_type='''numpy''' ).images __magic_name__ = torch.manual_seed(0 ) __magic_name__ = ldm(generator=A , num_inference_steps=2 , output_type='''numpy''' , return_dict=A )[0] __magic_name__ = image[0, -3:, -3:, -1] __magic_name__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __magic_name__ = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] ) __magic_name__ = 1E-2 if torch_device != '''mps''' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' ) ldm.to(A ) ldm.set_progress_bar_config(disable=A ) __magic_name__ = torch.manual_seed(0 ) __magic_name__ = ldm(generator=A , num_inference_steps=5 , output_type='''numpy''' ).images __magic_name__ = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) __magic_name__ = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] ) __magic_name__ = 1E-2 if torch_device != '''mps''' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
719
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ): # Initialise PyTorch model __magic_name__ = LxmertConfig.from_json_file(snake_case_ ) print(f'Building PyTorch model from configuration: {config}' ) __magic_name__ = LxmertForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case_ ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
678
0
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , A , A=13 , A=7 , A=True , A=True , A=False , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> List[Any]: '''simple docstring''' __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def __A ( self ) -> List[Any]: '''simple docstring''' __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Dict: '''simple docstring''' return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , ) def __A ( self , A , A , A , A , A , A , A ) -> int: '''simple docstring''' __magic_name__ = BioGptModel(config=A ) model.to(A ) model.eval() __magic_name__ = model(A , attention_mask=A ) __magic_name__ = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , A , A , A , A , A , A , A , A , A , ) -> List[Any]: '''simple docstring''' __magic_name__ = BioGptForCausalLM(config=A ) model.to(A ) model.eval() __magic_name__ = model(A , attention_mask=A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , A , A , A , A , A , *A ) -> Tuple: '''simple docstring''' __magic_name__ = BioGptModel(config=A ) model.to(A ) model.eval() # create attention mask __magic_name__ = torch.ones(input_ids.shape , dtype=torch.long , device=A ) __magic_name__ = self.seq_length // 2 __magic_name__ = 0 # first forward pass __magic_name__ , __magic_name__ = model(A , attention_mask=A ).to_tuple() # create hypothetical next token and extent to next_input_ids __magic_name__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __magic_name__ = ids_tensor((1,) , A ).item() + 1 __magic_name__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __magic_name__ = random_other_next_tokens # append to next input_ids and attn_mask __magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 ) __magic_name__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=A )] , dim=1 , ) # get two different outputs __magic_name__ = model(A , attention_mask=A )['''last_hidden_state'''] __magic_name__ = model(A , past_key_values=A , attention_mask=A )['''last_hidden_state'''] # select random slice __magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() __magic_name__ = output_from_no_past[:, -1, random_slice_idx].detach() __magic_name__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) ) def __A ( self , A , A , A , A , A , *A ) -> Any: '''simple docstring''' __magic_name__ = BioGptModel(config=A ).to(A ).eval() __magic_name__ = torch.ones(input_ids.shape , dtype=torch.long , device=A ) # first forward pass __magic_name__ = model(A , attention_mask=A , use_cache=A ) __magic_name__ , __magic_name__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) __magic_name__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 ) __magic_name__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __magic_name__ = model(A , attention_mask=A )['''last_hidden_state'''] __magic_name__ = model(A , attention_mask=A , past_key_values=A )[ '''last_hidden_state''' ] # select random slice __magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() __magic_name__ = output_from_no_past[:, -3:, random_slice_idx].detach() __magic_name__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) ) def __A ( self , A , A , A , A , A , *A , A=False ) -> Optional[int]: '''simple docstring''' __magic_name__ = BioGptForCausalLM(A ) model.to(A ) if gradient_checkpointing: model.gradient_checkpointing_enable() __magic_name__ = model(A , labels=A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def __A ( self , A , *A ) -> Tuple: '''simple docstring''' __magic_name__ = BioGptModel(A ) __magic_name__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def __A ( self , A , A , A , A , A , *A ) -> Optional[Any]: '''simple docstring''' __magic_name__ = self.num_labels __magic_name__ = BioGptForTokenClassification(A ) model.to(A ) model.eval() __magic_name__ = model(A , attention_mask=A , token_type_ids=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _a = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) _a = (BioGptForCausalLM,) if is_torch_available() else () _a = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) _a = False def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = BioGptModelTester(self ) __magic_name__ = ConfigTester(self , config_class=A , hidden_size=37 ) def __A ( self ) -> int: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> int: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*A ) def __A ( self ) -> int: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A ) def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*A , gradient_checkpointing=A ) def __A ( self ) -> int: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A ) def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*A ) def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*A ) @slow def __A ( self ) -> List[Any]: '''simple docstring''' __magic_name__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(A ) __magic_name__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) __magic_name__ = '''left''' # Define PAD Token = EOS Token = 50256 __magic_name__ = tokenizer.eos_token __magic_name__ = model.config.eos_token_id # use different length sentences to test batching __magic_name__ = [ '''Hello, my dog is a little''', '''Today, I''', ] __magic_name__ = tokenizer(A , return_tensors='''pt''' , padding=A ) __magic_name__ = inputs['''input_ids'''].to(A ) __magic_name__ = model.generate( input_ids=A , attention_mask=inputs['''attention_mask'''].to(A ) , ) __magic_name__ = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(A ) __magic_name__ = model.generate(input_ids=A ) __magic_name__ = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() __magic_name__ = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(A ) __magic_name__ = model.generate(input_ids=A , max_length=model.config.max_length - num_paddings ) __magic_name__ = tokenizer.batch_decode(A , skip_special_tokens=A ) __magic_name__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=A ) __magic_name__ = tokenizer.decode(output_padded[0] , skip_special_tokens=A ) __magic_name__ = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(A , A ) self.assertListEqual(A , [non_padded_sentence, padded_sentence] ) @slow def __A ( self ) -> List[str]: '''simple docstring''' for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = BioGptModel.from_pretrained(A ) self.assertIsNotNone(A ) def __A ( self ) -> Any: '''simple docstring''' __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = input_dict['''input_ids'''] __magic_name__ = input_ids.ne(1 ).to(A ) __magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __magic_name__ = BioGptForSequenceClassification(A ) model.to(A ) model.eval() __magic_name__ = model(A , attention_mask=A , labels=A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = '''multi_label_classification''' __magic_name__ = input_dict['''input_ids'''] __magic_name__ = input_ids.ne(1 ).to(A ) __magic_name__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __magic_name__ = BioGptForSequenceClassification(A ) model.to(A ) model.eval() __magic_name__ = model(A , attention_mask=A , labels=A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @slow def __A ( self ) -> List[str]: '''simple docstring''' __magic_name__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) __magic_name__ = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) __magic_name__ = model(A )[0] __magic_name__ = 4_23_84 __magic_name__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , A ) __magic_name__ = torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1E-4 ) ) @slow def __A ( self ) -> List[str]: '''simple docstring''' __magic_name__ = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) __magic_name__ = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(A ) torch.manual_seed(0 ) __magic_name__ = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(A ) __magic_name__ = model.generate( **A , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=A , ) __magic_name__ = tokenizer.decode(output_ids[0] , skip_special_tokens=A ) __magic_name__ = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(A , A )
720
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ): __magic_name__ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] __magic_name__ = { '''wmt16-en-de-dist-12-1''': [28.3, 27.52], '''wmt16-en-de-dist-6-1''': [27.4, 27.11], '''wmt16-en-de-12-1''': [26.9, 25.75], } __magic_name__ = f'{src_lang}-{tgt_lang}' __magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n' model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ ) __magic_name__ = os.path.join(snake_case_ , '''README.md''' ) print(f'Generating {path}' ) with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f: f.write(snake_case_ ) # make sure we are under the root of the project a_ : Tuple = Path(__file__).resolve().parent.parent.parent a_ : Dict = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: a_ : List[str] = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
678
0
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ): # Initialise PyTorch model __magic_name__ = LxmertConfig.from_json_file(snake_case_ ) print(f'Building PyTorch model from configuration: {config}' ) __magic_name__ = LxmertForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case_ ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
721
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ): __magic_name__ = len(snake_case_ ) print('''The following activities are selected:''' ) # The first activity is always selected __magic_name__ = 0 print(snake_case_ , end=''',''' ) # Consider rest of the activities for j in range(snake_case_ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(snake_case_ , end=''',''' ) __magic_name__ = j if __name__ == "__main__": import doctest doctest.testmod() a_ : Dict = [1, 3, 0, 5, 8, 5] a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
678
0
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate a_ : Dict = trt.Logger(trt.Logger.WARNING) a_ : int = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) a_ : Optional[int] = logging.getLogger(__name__) a_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--onnx_model_path', default=None, type=str, required=True, help='Path to ONNX model: ', ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='The output directory where the model checkpoints and predictions will be written.', ) # Other parameters parser.add_argument( '--tokenizer_name', default='', type=str, required=True, help='Pretrained tokenizer name or path if not the same as model_name', ) parser.add_argument( '--version_2_with_negative', action='store_true', help='If true, the SQuAD examples contain some that do not have an answer.', ) parser.add_argument( '--null_score_diff_threshold', type=float, default=0.0, help='If null_score - best_non_null is greater than the threshold predict null.', ) parser.add_argument( '--max_seq_length', default=384, type=int, help=( 'The maximum total input sequence length after WordPiece tokenization. Sequences ' 'longer than this will be truncated, and sequences shorter than this will be padded.' ), ) parser.add_argument( '--doc_stride', default=128, type=int, help='When splitting up a long document into chunks, how much stride to take between chunks.', ) parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument( '--n_best_size', default=20, type=int, help='The total number of n-best predictions to generate in the nbest_predictions.json output file.', ) parser.add_argument( '--max_answer_length', default=30, type=int, help=( 'The maximum length of an answer that can be generated. This is needed because the start ' 'and end predictions are not conditioned on one another.' ), ) parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument( '--dataset_name', type=str, default=None, required=True, help='The name of the dataset to use (via the datasets library).', ) parser.add_argument( '--dataset_config_name', type=str, default=None, help='The configuration name of the dataset to use (via the datasets library).', ) parser.add_argument( '--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.' ) parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument( '--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision instead of 32-bit', ) parser.add_argument( '--int8', action='store_true', help='Whether to use INT8', ) a_ : str = parser.parse_args() if args.tokenizer_name: a_ : int = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported by this script.' 'You can do it from another script, save it, and load it from here, using --tokenizer_name.' ) logger.info('Training/evaluation parameters %s', args) a_ : Tuple = args.per_device_eval_batch_size a_ : Optional[Any] = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties a_ : Optional[Any] = True a_ : List[Any] = 'temp_engine/bert-fp32.engine' if args.fpaa: a_ : Any = 'temp_engine/bert-fp16.engine' if args.inta: a_ : int = 'temp_engine/bert-int8.engine' # import ONNX file if not os.path.exists('temp_engine'): os.makedirs('temp_engine') a_ : List[Any] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, 'rb') as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network a_ : List[str] = [network.get_input(i) for i in range(network.num_inputs)] a_ : int = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: a_ : Optional[Any] = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) a_ : List[str] = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) a_ : List[str] = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, 'wb') as f: f.write(engine.serialize()) def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ): __magic_name__ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) __magic_name__ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) __magic_name__ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case_ ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case_ ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case_ ) # start time __magic_name__ = time.time() # Run inference context.execute_async( bindings=[int(snake_case_ ) for d_inp in d_inputs] + [int(snake_case_ ), int(snake_case_ )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ ) cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ ) # Synchronize the stream and take time stream.synchronize() # end time __magic_name__ = time.time() __magic_name__ = end_time - start_time __magic_name__ = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. a_ : Tuple = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. a_ : Dict = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError('Evaluation requires a dataset name') # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. a_ : Tuple = raw_datasets['validation'].column_names a_ : Optional[int] = 'question' if 'question' in column_names else column_names[0] a_ : List[Any] = 'context' if 'context' in column_names else column_names[1] a_ : Optional[Any] = 'answers' if 'answers' in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). a_ : Optional[int] = tokenizer.padding_side == 'right' if args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) a_ : Tuple = min(args.max_seq_length, tokenizer.model_max_length) def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace __magic_name__ = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __magic_name__ = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case_ , stride=args.doc_stride , return_overflowing_tokens=snake_case_ , return_offsets_mapping=snake_case_ , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __magic_name__ = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __magic_name__ = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __magic_name__ = tokenized_examples.sequence_ids(snake_case_ ) __magic_name__ = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __magic_name__ = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __magic_name__ = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples a_ : Dict = raw_datasets['validation'] # Validation Feature Creation a_ : int = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc='Running tokenizer on validation dataset', ) a_ : List[str] = default_data_collator a_ : Tuple = eval_dataset.remove_columns(['example_id', 'offset_mapping']) a_ : Optional[int] = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Dict="eval" ): # Post-processing: we match the start logits and end logits to answers in the original context. __magic_name__ = postprocess_qa_predictions( examples=snake_case_ , features=snake_case_ , predictions=snake_case_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case_ , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __magic_name__ = [ {'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items() ] else: __magic_name__ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()] __magic_name__ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=snake_case_ , label_ids=snake_case_ ) a_ : Any = load_metric('squad_v2' if args.version_2_with_negative else 'squad') # Evaluation! logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path) with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): return trt.volume(engine.get_binding_shape(snake_case_ ) ) * engine.get_binding_dtype(snake_case_ ).itemsize # Allocate device memory for inputs and outputs. a_ : Any = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer a_ : Tuple = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) a_ : Union[str, Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) a_ : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes) a_ : List[str] = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. a_ : Optional[int] = cuda.Stream() # Evaluation logger.info('***** Running Evaluation *****') logger.info(F""" Num examples = {len(eval_dataset)}""") logger.info(F""" Batch size = {args.per_device_eval_batch_size}""") a_ : Tuple = 0.0 a_ : List[Any] = 0 a_ : int = timeit.default_timer() a_ : Dict = None for step, batch in enumerate(eval_dataloader): a_ : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 a_ : Union[str, Any] = outputs a_ : Any = torch.tensor(start_logits) a_ : Tuple = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered a_ : List[str] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) a_ : List[str] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) a_ : str = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) a_ : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: a_ : Optional[Any] = nested_truncate(all_preds, len(eval_dataset)) a_ : Optional[Any] = timeit.default_timer() - start_time logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter)) logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000)) logger.info('Total Number of Inference = %d', niter) a_ : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds) a_ : List[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F"""Evaluation metrics: {eval_metric}""")
700
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name a_ : List[str] = 256 class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _a = ["""melgan"""] def __init__( self , A , A , A , A , A , ) -> None: '''simple docstring''' super().__init__() # From MELGAN __magic_name__ = math.log(1E-5 ) # Matches MelGAN training. __magic_name__ = 4.0 # Largest value for most examples __magic_name__ = 1_28 self.register_modules( notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , ) def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]: '''simple docstring''' __magic_name__ , __magic_name__ = output_range if clip: __magic_name__ = torch.clip(A , self.min_value , self.max_value ) # Scale to [0, 1]. __magic_name__ = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]: '''simple docstring''' __magic_name__ , __magic_name__ = input_range __magic_name__ = torch.clip(A , A , A ) if clip else outputs # Scale to [0, 1]. __magic_name__ = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __A ( self , A , A , A ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = input_tokens > 0 __magic_name__ , __magic_name__ = self.notes_encoder( encoder_input_tokens=A , encoder_inputs_mask=A ) __magic_name__ , __magic_name__ = self.continuous_encoder( encoder_inputs=A , encoder_inputs_mask=A ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __A ( self , A , A , A ) -> Optional[int]: '''simple docstring''' __magic_name__ = noise_time if not torch.is_tensor(A ): __magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(A ) and len(timesteps.shape ) == 0: __magic_name__ = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) __magic_name__ = self.decoder( encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A ) return logits @torch.no_grad() def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]: '''simple docstring''' if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(A )}.' ) __magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) __magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa ) __magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device ) for i, encoder_input_tokens in enumerate(A ): if i == 0: __magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. __magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. __magic_name__ = ones __magic_name__ = self.scale_features( A , output_range=[-1.0, 1.0] , clip=A ) __magic_name__ = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop __magic_name__ = randn_tensor( shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(A ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __magic_name__ = self.decode( encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 __magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample __magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] ) __magic_name__ = mel[:1] __magic_name__ = mel.cpu().float().numpy() __magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A ) logger.info('''Generated segment''' , A ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": __magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: __magic_name__ = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=A )
678
0
def _SCREAMING_SNAKE_CASE ( snake_case_ : bytes ): return "".join([hex(snake_case_ )[2:].zfill(2 ).upper() for byte in list(snake_case_ )] ) def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(snake_case_ ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case_ ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case_ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
701
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
678
0
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): if n == 1 or not isinstance(snake_case_ , snake_case_ ): return 0 elif n == 2: return 1 else: __magic_name__ = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): __magic_name__ = 0 __magic_name__ = 2 while digits < n: index += 1 __magic_name__ = len(str(fibonacci(snake_case_ ) ) ) return index def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000 ): return fibonacci_digits_index(snake_case_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
702
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ): __magic_name__ = SwinConfig(image_size=192 ) if "base" in model_name: __magic_name__ = 6 __magic_name__ = 128 __magic_name__ = (2, 2, 18, 2) __magic_name__ = (4, 8, 16, 32) elif "large" in model_name: __magic_name__ = 12 __magic_name__ = 192 __magic_name__ = (2, 2, 18, 2) __magic_name__ = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) __magic_name__ = window_size __magic_name__ = embed_dim __magic_name__ = depths __magic_name__ = num_heads return config def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ): if "encoder.mask_token" in name: __magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: __magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: __magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: __magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __magic_name__ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __magic_name__ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __magic_name__ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": __magic_name__ = '''layernorm.weight''' if name == "encoder.norm.bias": __magic_name__ = '''layernorm.bias''' if "decoder" in name: pass else: __magic_name__ = '''swin.''' + name return name def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ): for key in orig_state_dict.copy().keys(): __magic_name__ = orig_state_dict.pop(snake_case_ ) if "attn_mask" in key: pass elif "qkv" in key: __magic_name__ = key.split('''.''' ) __magic_name__ = int(key_split[2] ) __magic_name__ = int(key_split[4] ) __magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __magic_name__ = val[:dim, :] __magic_name__ = val[ dim : dim * 2, : ] __magic_name__ = val[-dim:, :] else: __magic_name__ = val[ :dim ] __magic_name__ = val[ dim : dim * 2 ] __magic_name__ = val[ -dim: ] else: __magic_name__ = val return orig_state_dict def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ): __magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model'''] __magic_name__ = get_swin_config(snake_case_ ) __magic_name__ = SwinForMaskedImageModeling(snake_case_ ) model.eval() __magic_name__ = convert_state_dict(snake_case_ , snake_case_ ) model.load_state_dict(snake_case_ ) __magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) __magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) __magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' ) with torch.no_grad(): __magic_name__ = model(**snake_case_ ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case_ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case_ ) if push_to_hub: print(f'Pushing model and image processor for {model_name} to hub' ) model.push_to_hub(f'microsoft/{model_name}' ) image_processor.push_to_hub(f'microsoft/{model_name}' ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a_ : Optional[Any] = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
678
0
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE_ ( metaclass=SCREAMING_SNAKE_CASE__ ): """simple docstring""" _a = ["""torch""", """torchsde"""] def __init__( self , *A , **A ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def __A ( cls , *A , **A ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def __A ( cls , *A , **A ) -> Tuple: '''simple docstring''' requires_backends(cls , ['''torch''', '''torchsde'''] )
703
from __future__ import annotations import collections import pprint from pathlib import Path def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): return "".join(sorted(snake_case_ ) ) def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): return word_by_signature[signature(snake_case_ )] a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8') a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()}) a_ : List[Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('anagrams.txt', 'w') as file: file.write('all_anagrams = \n ') file.write(pprint.pformat(all_anagrams))
678
0
import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any]=1 ): if n_shave_prefix_segments >= 0: return ".".join(path.split('''.''' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('''.''' )[:n_shave_prefix_segments] ) def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[str]=0 ): __magic_name__ = [] for old_item in old_list: __magic_name__ = old_item.replace('''in_layers.0''' , '''norm1''' ) __magic_name__ = new_item.replace('''in_layers.2''' , '''conv1''' ) __magic_name__ = new_item.replace('''out_layers.0''' , '''norm2''' ) __magic_name__ = new_item.replace('''out_layers.3''' , '''conv2''' ) __magic_name__ = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' ) __magic_name__ = new_item.replace('''skip_connection''' , '''conv_shortcut''' ) __magic_name__ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int=0 ): __magic_name__ = [] for old_item in old_list: __magic_name__ = old_item __magic_name__ = new_item.replace('''norm.weight''' , '''group_norm.weight''' ) __magic_name__ = new_item.replace('''norm.bias''' , '''group_norm.bias''' ) __magic_name__ = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' ) __magic_name__ = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' ) __magic_name__ = shave_segments(snake_case_ , n_shave_prefix_segments=snake_case_ ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : List[str]=None , snake_case_ : int=None , snake_case_ : Any=None ): assert isinstance(snake_case_ , snake_case_ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): __magic_name__ = old_checkpoint[path] __magic_name__ = old_tensor.shape[0] // 3 __magic_name__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) __magic_name__ = old_tensor.shape[0] // config['''num_head_channels'''] // 3 __magic_name__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) __magic_name__ , __magic_name__ , __magic_name__ = old_tensor.split(channels // num_heads , dim=1 ) __magic_name__ = query.reshape(snake_case_ ) __magic_name__ = key.reshape(snake_case_ ) __magic_name__ = value.reshape(snake_case_ ) for path in paths: __magic_name__ = path['''new'''] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here __magic_name__ = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' ) __magic_name__ = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' ) __magic_name__ = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' ) if additional_replacements is not None: for replacement in additional_replacements: __magic_name__ = new_path.replace(replacement['''old'''] , replacement['''new'''] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: __magic_name__ = old_checkpoint[path['''old''']][:, :, 0] else: __magic_name__ = old_checkpoint[path['''old''']] def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] ): __magic_name__ = {} __magic_name__ = checkpoint['''time_embed.0.weight'''] __magic_name__ = checkpoint['''time_embed.0.bias'''] __magic_name__ = checkpoint['''time_embed.2.weight'''] __magic_name__ = checkpoint['''time_embed.2.bias'''] __magic_name__ = checkpoint['''input_blocks.0.0.weight'''] __magic_name__ = checkpoint['''input_blocks.0.0.bias'''] __magic_name__ = checkpoint['''out.0.weight'''] __magic_name__ = checkpoint['''out.0.bias'''] __magic_name__ = checkpoint['''out.2.weight'''] __magic_name__ = checkpoint['''out.2.bias'''] # Retrieves the keys for the input blocks only __magic_name__ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} ) __magic_name__ = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the middle blocks only __magic_name__ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} ) __magic_name__ = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(snake_case_ ) } # Retrieves the keys for the output blocks only __magic_name__ = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} ) __magic_name__ = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(snake_case_ ) } for i in range(1 , snake_case_ ): __magic_name__ = (i - 1) // (config['''num_res_blocks'''] + 1) __magic_name__ = (i - 1) % (config['''num_res_blocks'''] + 1) __magic_name__ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] __magic_name__ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: __magic_name__ = checkpoint[ f'input_blocks.{i}.0.op.weight' ] __magic_name__ = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue __magic_name__ = renew_resnet_paths(snake_case_ ) __magic_name__ = {'''old''': f'input_blocks.{i}.0', '''new''': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} __magic_name__ = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''} assign_to_checkpoint( snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path, resnet_op] , config=snake_case_ ) if len(snake_case_ ): __magic_name__ = renew_attention_paths(snake_case_ ) __magic_name__ = { '''old''': f'input_blocks.{i}.1', '''new''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } __magic_name__ = { f'input_blocks.{i}.1.qkv.bias': { '''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', '''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', '''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { '''key''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', '''query''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', '''value''': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=snake_case_ , config=snake_case_ , ) __magic_name__ = middle_blocks[0] __magic_name__ = middle_blocks[1] __magic_name__ = middle_blocks[2] __magic_name__ = renew_resnet_paths(snake_case_ ) assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ ) __magic_name__ = renew_resnet_paths(snake_case_ ) assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , config=snake_case_ ) __magic_name__ = renew_attention_paths(snake_case_ ) __magic_name__ = { '''middle_block.1.qkv.bias''': { '''key''': '''mid_block.attentions.0.key.bias''', '''query''': '''mid_block.attentions.0.query.bias''', '''value''': '''mid_block.attentions.0.value.bias''', }, '''middle_block.1.qkv.weight''': { '''key''': '''mid_block.attentions.0.key.weight''', '''query''': '''mid_block.attentions.0.query.weight''', '''value''': '''mid_block.attentions.0.value.weight''', }, } assign_to_checkpoint( snake_case_ , snake_case_ , snake_case_ , attention_paths_to_split=snake_case_ , config=snake_case_ ) for i in range(snake_case_ ): __magic_name__ = i // (config['''num_res_blocks'''] + 1) __magic_name__ = i % (config['''num_res_blocks'''] + 1) __magic_name__ = [shave_segments(snake_case_ , 2 ) for name in output_blocks[i]] __magic_name__ = {} for layer in output_block_layers: __magic_name__ , __magic_name__ = layer.split('''.''' )[0], shave_segments(snake_case_ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case_ ) else: __magic_name__ = [layer_name] if len(snake_case_ ) > 1: __magic_name__ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] __magic_name__ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] __magic_name__ = renew_resnet_paths(snake_case_ ) __magic_name__ = renew_resnet_paths(snake_case_ ) __magic_name__ = {'''old''': f'output_blocks.{i}.0', '''new''': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , config=snake_case_ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): __magic_name__ = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] ) __magic_name__ = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] __magic_name__ = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(snake_case_ ) == 2: __magic_name__ = [] if len(snake_case_ ): __magic_name__ = renew_attention_paths(snake_case_ ) __magic_name__ = { '''old''': f'output_blocks.{i}.1', '''new''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } __magic_name__ = { f'output_blocks.{i}.1.qkv.bias': { '''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', '''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', '''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { '''key''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', '''query''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', '''value''': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( snake_case_ , snake_case_ , snake_case_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=snake_case_ , ) else: __magic_name__ = renew_resnet_paths(snake_case_ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: __magic_name__ = '''.'''.join(['''output_blocks''', str(snake_case_ ), path['''old''']] ) __magic_name__ = '''.'''.join(['''up_blocks''', str(snake_case_ ), '''resnets''', str(snake_case_ ), path['''new''']] ) __magic_name__ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') a_ = parser.parse_args() a_ = torch.load(args.checkpoint_path) with open(args.config_file) as f: a_ = json.loads(f.read()) a_ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] a_ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: a_ = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) a_ = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) a_ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
704
from __future__ import annotations from scipy.special import comb # type: ignore class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , A ) -> Tuple: '''simple docstring''' __magic_name__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __magic_name__ = len(A ) - 1 def __A ( self , A ) -> list[float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __magic_name__ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(A ) , 5 ) == 1 return output_values def __A ( self , A ) -> tuple[float, float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __magic_name__ = self.basis_function(A ) __magic_name__ = 0.0 __magic_name__ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def __A ( self , A = 0.01 ) -> Tuple: '''simple docstring''' from matplotlib import pyplot as plt # type: ignore __magic_name__ = [] # x coordinates of points to plot __magic_name__ = [] # y coordinates of points to plot __magic_name__ = 0.0 while t <= 1: __magic_name__ = self.bezier_curve_function(A ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __magic_name__ = [i[0] for i in self.list_of_points] __magic_name__ = [i[1] for i in self.list_of_points] plt.plot( A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(A , A , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
678
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer a_ : Union[str, Any] = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast a_ : Optional[Any] = TaTokenizerFast a_ : int = {'configuration_mt5': ['MT5Config', 'MT5OnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = [ 'MT5EncoderModel', 'MT5ForConditionalGeneration', 'MT5ForQuestionAnswering', 'MT5Model', 'MT5PreTrainedModel', 'MT5Stack', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = ['TFMT5EncoderModel', 'TFMT5ForConditionalGeneration', 'TFMT5Model'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = ['FlaxMT5EncoderModel', 'FlaxMT5ForConditionalGeneration', 'FlaxMT5Model'] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys a_ : str = _LazyModule( __name__, globals()['__file__'], _import_structure, extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast}, module_spec=__spec__, )
705
import re def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): __magic_name__ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(snake_case_ , snake_case_ ) ) if __name__ == "__main__": a_ : Optional[int] = '0094702343221' print(is_sri_lankan_phone_number(phone))
678
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , A , A=13 , A=30 , A=2 , A=3 , A=True , A=True , A=32 , A=2 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=10 , A=0.02 , A=3 , A=None , A=2 , ) -> Tuple: '''simple docstring''' __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = is_training __magic_name__ = use_labels __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = scope __magic_name__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) __magic_name__ = (image_size // patch_size) ** 2 __magic_name__ = num_patches + 2 def __A ( self ) -> List[str]: '''simple docstring''' __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = self.get_config() return config, pixel_values, labels def __A ( self ) -> Union[str, Any]: '''simple docstring''' return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __A ( self , A , A , A ) -> Optional[int]: '''simple docstring''' __magic_name__ = TFDeiTModel(config=A ) __magic_name__ = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , A , A , A ) -> str: '''simple docstring''' __magic_name__ = TFDeiTForMaskedImageModeling(config=A ) __magic_name__ = model(A ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __magic_name__ = 1 __magic_name__ = TFDeiTForMaskedImageModeling(A ) __magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __magic_name__ = model(A ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __A ( self , A , A , A ) -> Tuple: '''simple docstring''' __magic_name__ = self.type_sequence_label_size __magic_name__ = TFDeiTForImageClassification(A ) __magic_name__ = model(A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __magic_name__ = 1 __magic_name__ = TFDeiTForImageClassification(A ) __magic_name__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __magic_name__ = model(A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _a = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) _a = ( { """feature-extraction""": TFDeiTModel, """image-classification""": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) _a = False _a = False _a = False _a = False def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = TFDeiTModelTester(self ) __magic_name__ = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 ) def __A ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __A ( self ) -> Optional[int]: '''simple docstring''' pass def __A ( self ) -> Optional[Any]: '''simple docstring''' __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) __magic_name__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A , tf.keras.layers.Dense ) ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(A ) __magic_name__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A ) def __A ( self ) -> int: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*A ) def __A ( self ) -> List[Any]: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def __A ( self , A , A , A=False ) -> Dict: '''simple docstring''' __magic_name__ = super()._prepare_for_class(A , A , return_labels=A ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __A ( self ) -> Tuple: '''simple docstring''' for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = TFDeiTModel.from_pretrained(A ) self.assertIsNotNone(A ) def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @cached_property def __A ( self ) -> Dict: '''simple docstring''' return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) __magic_name__ = self.default_image_processor __magic_name__ = prepare_img() __magic_name__ = image_processor(images=A , return_tensors='''tf''' ) # forward pass __magic_name__ = model(**A ) # verify the logits __magic_name__ = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , A ) __magic_name__ = tf.constant([-1.02_66, 0.19_12, -1.28_61] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
706
import os import sys import unittest a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers') class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = find_backend(''' if not is_torch_available():''' ) self.assertEqual(A , '''torch''' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' ) self.assertEqual(A , '''torch_and_transformers''' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __magic_name__ = find_backend( ''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' ) self.assertEqual(A , '''torch_and_transformers_and_onnx''' ) def __A ( self ) -> str: '''simple docstring''' __magic_name__ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , A ) self.assertIn('''torch_and_transformers''' , A ) self.assertIn('''flax_and_transformers''' , A ) self.assertIn('''torch_and_transformers_and_onnx''' , A ) # Likewise, we can't assert on the exact content of a key self.assertIn('''UNet2DModel''' , objects['''torch'''] ) self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] ) self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] ) self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] ) self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] ) self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] ) def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(A , '''\nCONSTANT = None\n''' ) __magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __magic_name__ = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, \'torch\') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, \'torch\') ''' __magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(A , A ) def __A ( self ) -> int: '''simple docstring''' __magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) ''' __magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , A )
678
0
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): __magic_name__ = 1 for i in range(1 , num + 1 ): fact *= i return fact def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): __magic_name__ = 0 while number > 0: __magic_name__ = number % 10 sum_of_digits += last_digit __magic_name__ = number // 10 # Removing the last_digit from the given number return sum_of_digits def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 100 ): __magic_name__ = factorial(snake_case_ ) __magic_name__ = split_and_add(snake_case_ ) return result if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
707
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ): __magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] ) if ( min(snake_case_ , snake_case_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) __magic_name__ = 0 count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
678
0
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" _a = MODEL_FOR_CAUSAL_LM_MAPPING _a = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output __magic_name__ = text_generator('''This is a test''' , do_sample=A ) self.assertEqual( A , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) __magic_name__ = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( A , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) __magic_name__ = text_generator('''This is a test''' , do_sample=A , num_return_sequences=2 , return_tensors=A ) self.assertEqual( A , [ {'''generated_token_ids''': ANY(A )}, {'''generated_token_ids''': ANY(A )}, ] , ) __magic_name__ = text_generator.model.config.eos_token_id __magic_name__ = '''<pad>''' __magic_name__ = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=A , num_return_sequences=2 , batch_size=2 , return_tensors=A , ) self.assertEqual( A , [ [ {'''generated_token_ids''': ANY(A )}, {'''generated_token_ids''': ANY(A )}, ], [ {'''generated_token_ids''': ANY(A )}, {'''generated_token_ids''': ANY(A )}, ], ] , ) @require_tf def __A ( self ) -> int: '''simple docstring''' __magic_name__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output __magic_name__ = text_generator('''This is a test''' , do_sample=A ) self.assertEqual( A , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) __magic_name__ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=A ) self.assertEqual( A , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __A ( self , A , A , A ) -> Dict: '''simple docstring''' __magic_name__ = TextGenerationPipeline(model=A , tokenizer=A ) return text_generator, ["This is a test", "Another test"] def __A ( self ) -> Optional[Any]: '''simple docstring''' __magic_name__ = '''Hello I believe in''' __magic_name__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) __magic_name__ = text_generator(A ) self.assertEqual( A , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) __magic_name__ = text_generator(A , stop_sequence=''' fe''' ) self.assertEqual(A , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __A ( self , A , A ) -> Tuple: '''simple docstring''' __magic_name__ = text_generator.model __magic_name__ = text_generator.tokenizer __magic_name__ = text_generator('''This is a test''' ) self.assertEqual(A , [{'''generated_text''': ANY(A )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) __magic_name__ = text_generator('''This is a test''' , return_full_text=A ) self.assertEqual(A , [{'''generated_text''': ANY(A )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) __magic_name__ = pipeline(task='''text-generation''' , model=A , tokenizer=A , return_full_text=A ) __magic_name__ = text_generator('''This is a test''' ) self.assertEqual(A , [{'''generated_text''': ANY(A )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) __magic_name__ = text_generator('''This is a test''' , return_full_text=A ) self.assertEqual(A , [{'''generated_text''': ANY(A )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) __magic_name__ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=A ) self.assertEqual( A , [ [{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}], [{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}], ] , ) if text_generator.tokenizer.pad_token is not None: __magic_name__ = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=A ) self.assertEqual( A , [ [{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}], [{'''generated_text''': ANY(A )}, {'''generated_text''': ANY(A )}], ] , ) with self.assertRaises(A ): __magic_name__ = text_generator('''test''' , return_full_text=A , return_text=A ) with self.assertRaises(A ): __magic_name__ = text_generator('''test''' , return_full_text=A , return_tensors=A ) with self.assertRaises(A ): __magic_name__ = text_generator('''test''' , return_text=A , return_tensors=A ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): __magic_name__ = text_generator('''''' ) self.assertEqual(A , [{'''generated_text''': ANY(A )}] ) else: with self.assertRaises((ValueError, AssertionError) ): __magic_name__ = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. __magic_name__ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 5_00 , max_new_tokens=20 ) __magic_name__ = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(A ): text_generator( '''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __A ( self ) -> Any: '''simple docstring''' import torch # Classic `model_kwargs` __magic_name__ = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __magic_name__ = pipe('''This is a test''' ) self.assertEqual( A , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) __magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) __magic_name__ = pipe('''This is a test''' ) self.assertEqual( A , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 __magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) __magic_name__ = pipe('''This is a test''' ) self.assertEqual( A , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __A ( self ) -> Tuple: '''simple docstring''' import torch __magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __A ( self ) -> Optional[int]: '''simple docstring''' import torch __magic_name__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=A , top_p=0.5 ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = '''Hello world''' __magic_name__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": __magic_name__ = logging.get_logger('''transformers.generation.tf_utils''' ) else: __magic_name__ = logging.get_logger('''transformers.generation.utils''' ) __magic_name__ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(A ) as cl: __magic_name__ = text_generator(A , max_length=10 , max_new_tokens=1 ) self.assertIn(A , cl.out ) # The user only sets one -> no warning with CaptureLogger(A ) as cl: __magic_name__ = text_generator(A , max_new_tokens=1 ) self.assertNotIn(A , cl.out ) with CaptureLogger(A ) as cl: __magic_name__ = text_generator(A , max_length=10 ) self.assertNotIn(A , cl.out )
708
a_ : Dict = { 'meter': 'm', 'kilometer': 'km', 'megametre': 'Mm', 'gigametre': 'Gm', 'terametre': 'Tm', 'petametre': 'Pm', 'exametre': 'Em', 'zettametre': 'Zm', 'yottametre': 'Ym', } # Exponent of the factor(meter) a_ : str = { 'm': 0, 'km': 3, 'Mm': 6, 'Gm': 9, 'Tm': 12, 'Pm': 15, 'Em': 18, 'Zm': 21, 'Ym': 24, } def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ): __magic_name__ = from_type.lower().strip('''s''' ) __magic_name__ = to_type.lower().strip('''s''' ) __magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ ) __magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ ) if from_sanitized not in METRIC_CONVERSION: __magic_name__ = ( f'Invalid \'from_type\' value: {from_type!r}.\n' f'Conversion abbreviations are: {", ".join(snake_case_ )}' ) raise ValueError(snake_case_ ) if to_sanitized not in METRIC_CONVERSION: __magic_name__ = ( f'Invalid \'to_type\' value: {to_type!r}.\n' f'Conversion abbreviations are: {", ".join(snake_case_ )}' ) raise ValueError(snake_case_ ) __magic_name__ = METRIC_CONVERSION[from_sanitized] __magic_name__ = METRIC_CONVERSION[to_sanitized] __magic_name__ = 1 if from_exponent > to_exponent: __magic_name__ = from_exponent - to_exponent else: __magic_name__ = -(to_exponent - from_exponent) return value * pow(10 , snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a_ : Optional[int] = 16 a_ : int = 32 def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ): __magic_name__ = AutoTokenizer.from_pretrained(snake_case_ ) __magic_name__ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case_ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __magic_name__ = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case_ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __magic_name__ = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) __magic_name__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ): model.eval() __magic_name__ = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __magic_name__ , __magic_name__ = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: __magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] __magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) __magic_name__ = metric.compute() return eval_metric["accuracy"] def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ): # Initialize accelerator __magic_name__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __magic_name__ = config['''lr'''] __magic_name__ = int(config['''num_epochs'''] ) __magic_name__ = int(config['''seed'''] ) __magic_name__ = int(config['''batch_size'''] ) __magic_name__ = args.model_name_or_path set_seed(snake_case_ ) __magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer __magic_name__ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: __magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __magic_name__ = 1 __magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __magic_name__ = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: __magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over __magic_name__ = 0 # We also need to keep track of the stating epoch so files are named properly __magic_name__ = 0 __magic_name__ = evaluate.load('''glue''' , '''mrpc''' ) __magic_name__ = num_epochs if args.partial_train_epoch is not None: __magic_name__ = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) __magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1] __magic_name__ = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break __magic_name__ = int(snake_case_ ) + 1 __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) accelerator.print('''resumed checkpoint performance:''' , snake_case_ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f: __magic_name__ = json.load(snake_case_ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model __magic_name__ = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.loss __magic_name__ = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 __magic_name__ = f'epoch_{epoch}' __magic_name__ = os.path.join(args.output_dir , snake_case_ ) accelerator.save_state(snake_case_ ) __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __magic_name__ = accuracy __magic_name__ = lr_scheduler.get_lr()[0] __magic_name__ = optimizer.param_groups[0]['''lr'''] __magic_name__ = epoch __magic_name__ = overall_step accelerator.print(f'epoch {epoch}:' , snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f: json.dump(snake_case_ , snake_case_ ) def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , ) parser.add_argument( '''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , ) __magic_name__ = parser.parse_args() __magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
709
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a_ : Union[str, Any] = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
678
0
import argparse a_ : Optional[Any] = 'docs/source/_static/js/custom.js' def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ): with open(snake_case_ , encoding='''utf-8''' , newline='''\n''' ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 # First let's put the right version while not lines[index].startswith('''const stableVersion =''' ): index += 1 __magic_name__ = f'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith('''const versionMapping = {''' ): index += 1 # We go until the end while not lines[index].startswith('''}''' ): index += 1 # We add the new version at the end lines[index - 1] += f' "v{version}": "v{version}",\n' with open(snake_case_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case_ ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument('--version', help='Release version.') a_ : List[str] = parser.parse_args() update_custom_js(args.version)
710
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str: '''simple docstring''' __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope __magic_name__ = self.vocab_size - 1 def __A ( self ) -> str: '''simple docstring''' __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self , A , A , A , A , *A ) -> Tuple: '''simple docstring''' __magic_name__ = OpenAIGPTModel(config=A ) model.to(A ) model.eval() __magic_name__ = model(A , token_type_ids=A , head_mask=A ) __magic_name__ = model(A , token_type_ids=A ) __magic_name__ = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , A , A , A , A , *A ) -> Dict: '''simple docstring''' __magic_name__ = OpenAIGPTLMHeadModel(A ) model.to(A ) model.eval() __magic_name__ = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , A , A , A , A , *A ) -> List[Any]: '''simple docstring''' __magic_name__ = OpenAIGPTDoubleHeadsModel(A ) model.to(A ) model.eval() __magic_name__ = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , A , A , A , A , *A ) -> Optional[int]: '''simple docstring''' __magic_name__ = self.num_labels __magic_name__ = OpenAIGPTForSequenceClassification(A ) model.to(A ) model.eval() __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _a = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _a = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _a = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __A ( self , A , A , A , A , A ) -> List[str]: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __A ( self , A , A , A=False ) -> List[str]: '''simple docstring''' __magic_name__ = super()._prepare_for_class(A , A , return_labels=A ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __magic_name__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , ) __magic_name__ = inputs_dict['''labels'''] __magic_name__ = inputs_dict['''labels'''] __magic_name__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , ) __magic_name__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A ) return inputs_dict def __A ( self ) -> str: '''simple docstring''' __magic_name__ = OpenAIGPTModelTester(self ) __magic_name__ = ConfigTester(self , config_class=A , n_embd=37 ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*A ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*A ) def __A ( self ) -> List[str]: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*A ) def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A ) @slow def __A ( self ) -> List[str]: '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = OpenAIGPTModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @slow def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(A ) __magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is __magic_name__ = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the __magic_name__ = model.generate(A , do_sample=A ) self.assertListEqual(output_ids[0].tolist() , A )
678
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : List[Any] ): # Initialise PyTorch model __magic_name__ = BigBirdConfig.from_json_file(snake_case_ ) print(f'Building PyTorch model from configuration: {config}' ) if is_trivia_qa: __magic_name__ = BigBirdForQuestionAnswering(snake_case_ ) else: __magic_name__ = BigBirdForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(snake_case_ , snake_case_ , is_trivia_qa=snake_case_ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(snake_case_ ) if __name__ == "__main__": a_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--big_bird_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained BERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_trivia_qa', action='store_true', help='Whether to convert a model with a trivia_qa head.' ) a_ : Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
711
def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = [] __magic_name__ = 1 while len(snake_case_ ) < 1E6: constant.append(str(snake_case_ ) ) i += 1 __magic_name__ = ''''''.join(snake_case_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
678
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device a_ : List[Any] = False class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' ) # remove text_unet pipe.remove_unused_weights() pipe.to(A ) pipe.set_progress_bar_config(disable=A ) __magic_name__ = '''A painting of a squirrel eating a burger ''' __magic_name__ = torch.manual_seed(0 ) __magic_name__ = pipe( prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A ) __magic_name__ = VersatileDiffusionTextToImagePipeline.from_pretrained(A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) __magic_name__ = generator.manual_seed(0 ) __magic_name__ = pipe( prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = VersatileDiffusionTextToImagePipeline.from_pretrained( '''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) __magic_name__ = '''A painting of a squirrel eating a burger ''' __magic_name__ = torch.manual_seed(0 ) __magic_name__ = pipe( prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images __magic_name__ = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) __magic_name__ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
712
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch a_ : str = True except ImportError: a_ : Optional[int] = False try: from torch.hub import _get_torch_home a_ : Optional[Any] = _get_torch_home() except ImportError: a_ : List[Any] = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) a_ : Any = os.path.join(torch_cache_home, 'transformers') a_ : Any = 'https://cdn.huggingface.co' a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert' a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) a_ : Any = os.path.join(PATH, 'config.yaml') a_ : Any = os.path.join(PATH, 'attributes.txt') a_ : Any = os.path.join(PATH, 'objects.txt') a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) a_ : int = 'pytorch_model.bin' a_ : Union[str, Any] = 'config.yaml' def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ): __magic_name__ = [] with open(snake_case_ ) as f: for object in f.readlines(): vg_classes.append(object.split(''',''' )[0].lower().strip() ) __magic_name__ = [] with open(snake_case_ ) as f: for object in f.readlines(): vg_attrs.append(object.split(''',''' )[0].lower().strip() ) return vg_classes, vg_attrs def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): __magic_name__ = OrderedDict() with open(snake_case_ , '''rb''' ) as f: __magic_name__ = pkl.load(snake_case_ )['''model'''] for k in copy.deepcopy(list(ckp.keys() ) ): __magic_name__ = ckp.pop(snake_case_ ) if isinstance(snake_case_ , np.ndarray ): __magic_name__ = torch.tensor(snake_case_ ) else: assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ ) __magic_name__ = v return r class SCREAMING_SNAKE_CASE_ : """simple docstring""" _a = {} def __init__( self , A , A = "root" , A=0 ) -> List[str]: '''simple docstring''' __magic_name__ = name __magic_name__ = level __magic_name__ = {} for k, v in dictionary.items(): if v is None: raise ValueError() __magic_name__ = copy.deepcopy(A ) __magic_name__ = copy.deepcopy(A ) if isinstance(A , A ): __magic_name__ = Config(A , name=A , level=level + 1 ) __magic_name__ = v setattr(self , A , A ) __magic_name__ = d def __repr__( self ) -> Union[str, Any]: '''simple docstring''' return str(list((self._pointer.keys()) ) ) def __setattr__( self , A , A ) -> Tuple: '''simple docstring''' __magic_name__ = val __magic_name__ = val __magic_name__ = key.split('''.''' ) __magic_name__ = len(A ) - 1 __magic_name__ = self._pointer if len(A ) > 1: for i, l in enumerate(A ): if hasattr(self , A ) and isinstance(getattr(self , A ) , A ): setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A ) if l == last_level: __magic_name__ = val else: __magic_name__ = pointer[l] def __A ( self ) -> List[Any]: '''simple docstring''' return self._pointer def __A ( self , A , A ) -> Any: '''simple docstring''' with open(F'{file_name}' , '''w''' ) as stream: dump(A , A ) def __A ( self , A , A ) -> List[Any]: '''simple docstring''' with open(F'{file_name}' , '''w''' ) as stream: json.dump(A , A ) @staticmethod def __A ( A ) -> Optional[Any]: '''simple docstring''' with open(A ) as stream: __magic_name__ = load(A , Loader=A ) return data def __str__( self ) -> List[Any]: '''simple docstring''' __magic_name__ = ''' ''' if self._name != "root": __magic_name__ = F'{t * (self._level-1)}{self._name}:\n' else: __magic_name__ = '''''' __magic_name__ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(A , A ): r += F'{t * (self._level)}{v}\n' self._level += 1 else: r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n' __magic_name__ = level return r[:-1] @classmethod def __A ( cls , A , **A ) -> int: '''simple docstring''' __magic_name__ , __magic_name__ = cls.get_config_dict(A , **A ) return cls(A ) @classmethod def __A ( cls , A , **A ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = kwargs.pop('''cache_dir''' , A ) __magic_name__ = kwargs.pop('''force_download''' , A ) __magic_name__ = kwargs.pop('''resume_download''' , A ) __magic_name__ = kwargs.pop('''proxies''' , A ) __magic_name__ = kwargs.pop('''local_files_only''' , A ) if os.path.isdir(A ): __magic_name__ = os.path.join(A , A ) elif os.path.isfile(A ) or is_remote_url(A ): __magic_name__ = pretrained_model_name_or_path else: __magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A ) try: # Load from URL or cache if already cached __magic_name__ = cached_path( A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , ) # Load config dict if resolved_config_file is None: raise EnvironmentError __magic_name__ = Config.load_yaml(A ) except EnvironmentError: __magic_name__ = '''Can\'t load config for''' raise EnvironmentError(A ) if resolved_config_file == config_file: print('''loading configuration file from path''' ) else: print('''loading configuration file cache''' ) return Config.load_yaml(A ), kwargs def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ): __magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device ) __magic_name__ = in_tensor.numpy() __magic_name__ = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), ( f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %' " element-wise mismatch" ) raise Exception('''tensors are all good''' ) # Hugging face functions below def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): __magic_name__ = urlparse(snake_case_ ) return parsed.scheme in ("http", "https") def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ): __magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX __magic_name__ = '''/''' not in model_id if legacy_format: return f'{endpoint}/{model_id}-{filename}' else: return f'{endpoint}/{model_id}/{filename}' def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ): __magic_name__ = '''python/{}'''.format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(snake_case_ , snake_case_ ): ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() ) elif isinstance(snake_case_ , snake_case_ ): ua += "; " + user_agent __magic_name__ = {'''user-agent''': ua} if resume_size > 0: __magic_name__ = '''bytes=%d-''' % (resume_size,) __magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ ) if response.status_code == 416: # Range not satisfiable return __magic_name__ = response.headers.get('''Content-Length''' ) __magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None __magic_name__ = tqdm( unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , ) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(snake_case_ ) ) temp_file.write(snake_case_ ) progress.close() def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ): if cache_dir is None: __magic_name__ = TRANSFORMERS_CACHE if isinstance(snake_case_ , snake_case_ ): __magic_name__ = str(snake_case_ ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) __magic_name__ = None if not local_files_only: try: __magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ ) if response.status_code == 200: __magic_name__ = response.headers.get('''ETag''' ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass __magic_name__ = url_to_filename(snake_case_ , snake_case_ ) # get cache path to put the file __magic_name__ = os.path.join(snake_case_ , snake_case_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(snake_case_ ): return cache_path else: __magic_name__ = [ file for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' ) if not file.endswith('''.json''' ) and not file.endswith('''.lock''' ) ] if len(snake_case_ ) > 0: return os.path.join(snake_case_ , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( '''Cannot find the requested files in the cached path and outgoing traffic has been''' ''' disabled. To enable model look-ups and downloads online, set \'local_files_only\'''' ''' to False.''' ) return None # From now on, etag is not None. if os.path.exists(snake_case_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. __magic_name__ = cache_path + '''.lock''' with FileLock(snake_case_ ): # If the download just completed while the lock was activated. if os.path.exists(snake_case_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: __magic_name__ = cache_path + '''.incomplete''' @contextmanager def _resumable_file_manager(): with open(snake_case_ , '''a+b''' ) as f: yield f __magic_name__ = _resumable_file_manager if os.path.exists(snake_case_ ): __magic_name__ = os.stat(snake_case_ ).st_size else: __magic_name__ = 0 else: __magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ ) __magic_name__ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( '''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , ) http_get( snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , ) os.replace(temp_file.name , snake_case_ ) __magic_name__ = {'''url''': url, '''etag''': etag} __magic_name__ = cache_path + '''.json''' with open(snake_case_ , '''w''' ) as meta_file: json.dump(snake_case_ , snake_case_ ) return cache_path def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ): __magic_name__ = url.encode('''utf-8''' ) __magic_name__ = shaaaa(snake_case_ ) __magic_name__ = url_hash.hexdigest() if etag: __magic_name__ = etag.encode('''utf-8''' ) __magic_name__ = shaaaa(snake_case_ ) filename += "." + etag_hash.hexdigest() if url.endswith('''.h5''' ): filename += ".h5" return filename def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ): if cache_dir is None: __magic_name__ = TRANSFORMERS_CACHE if isinstance(snake_case_ , snake_case_ ): __magic_name__ = str(snake_case_ ) if isinstance(snake_case_ , snake_case_ ): __magic_name__ = str(snake_case_ ) if is_remote_url(snake_case_ ): # URL, so get it from the cache (downloading if necessary) __magic_name__ = get_from_cache( snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , ) elif os.path.exists(snake_case_ ): # File, and it exists. __magic_name__ = url_or_filename elif urlparse(snake_case_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError('''file {} not found'''.format(snake_case_ ) ) else: # Something unknown raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) ) if extract_compressed_file: if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" __magic_name__ , __magic_name__ = os.path.split(snake_case_ ) __magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted''' __magic_name__ = os.path.join(snake_case_ , snake_case_ ) if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions __magic_name__ = output_path + '''.lock''' with FileLock(snake_case_ ): shutil.rmtree(snake_case_ , ignore_errors=snake_case_ ) os.makedirs(snake_case_ ) if is_zipfile(snake_case_ ): with ZipFile(snake_case_ , '''r''' ) as zip_file: zip_file.extractall(snake_case_ ) zip_file.close() elif tarfile.is_tarfile(snake_case_ ): __magic_name__ = tarfile.open(snake_case_ ) tar_file.extractall(snake_case_ ) tar_file.close() else: raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) ) return output_path_extracted return output_path def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ): assert isinstance(snake_case_ , snake_case_ ) if os.path.isfile(snake_case_ ): with open(snake_case_ ) as f: __magic_name__ = eval(f.read() ) else: __magic_name__ = requests.get(snake_case_ ) try: __magic_name__ = requests.json() except Exception: __magic_name__ = req.content.decode() assert data is not None, "could not connect" try: __magic_name__ = eval(snake_case_ ) except Exception: __magic_name__ = data.split('''\n''' ) req.close() return data def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ): __magic_name__ = requests.get(snake_case_ ) __magic_name__ = np.array(Image.open(BytesIO(response.content ) ) ) return img def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ): __magic_name__ = url.split('''/''' )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(snake_case_ ) with open(snake_case_ , '''rb''' ) as stream: __magic_name__ = pkl.load(snake_case_ ) __magic_name__ = weights.pop('''model''' ) __magic_name__ = {} for k, v in model.items(): __magic_name__ = torch.from_numpy(snake_case_ ) if "running_var" in k: __magic_name__ = torch.tensor([0] ) __magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' ) __magic_name__ = zero return new def _SCREAMING_SNAKE_CASE ( ): print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' ) def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ): assert isinstance(snake_case_ , snake_case_ ) if os.path.isfile(snake_case_ ): __magic_name__ = cva.imread(snake_case_ ) else: __magic_name__ = get_image_from_url(snake_case_ ) assert img is not None, f'could not connect to: {im}' __magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB ) if input_format == "RGB": __magic_name__ = img[:, :, ::-1] return img def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ): return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
678
0
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a_ : Tuple = ['gpt2'] a_ : int = 'gpt2' if is_tf_available(): class SCREAMING_SNAKE_CASE_ ( tf.Module ): """simple docstring""" def __init__( self , A ) -> Tuple: '''simple docstring''' super().__init__() __magic_name__ = tokenizer __magic_name__ = AutoConfig.from_pretrained(A ) __magic_name__ = TFGPTaLMHeadModel.from_config(A ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) ) def __A ( self , A ) -> Tuple: '''simple docstring''' __magic_name__ = self.tokenizer(A ) __magic_name__ = tokenized['''input_ids'''].to_tensor() __magic_name__ = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) __magic_name__ = self.model(input_ids=A , attention_mask=A )['''logits'''] return outputs @require_tf @require_keras_nlp class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> Dict: '''simple docstring''' super().setUp() __magic_name__ = [GPTaTokenizer.from_pretrained(A ) for checkpoint in (TOKENIZER_CHECKPOINTS)] __magic_name__ = [TFGPTaTokenizer.from_pretrained(A ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __magic_name__ = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] __magic_name__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: __magic_name__ = tokenizer([test_inputs] , return_tensors='''tf''' ) __magic_name__ = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors __magic_name__ = python_outputs[key].numpy() __magic_name__ = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(A , tf.intaa ) == tf_outputs_values ) ) @slow def __A ( self ) -> str: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __magic_name__ = tf.function(A ) for test_inputs in self.test_sentences: __magic_name__ = tf.constant(A ) __magic_name__ = compiled_tokenizer(A ) __magic_name__ = tf_tokenizer(A ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __A ( self ) -> List[str]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __magic_name__ = ModelToSave(tokenizer=A ) __magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] ) __magic_name__ = model.serving(A ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __magic_name__ = Path(A ) / '''saved.model''' tf.saved_model.save(A , A , signatures={'''serving_default''': model.serving} ) __magic_name__ = tf.saved_model.load(A ) __magic_name__ = loaded_model.signatures['''serving_default'''](A )['''output_0'''] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] ) __magic_name__ = tf_tokenizer(A ) # Build model with some sample inputs __magic_name__ = tf_tokenizer.get_config() __magic_name__ = TFGPTaTokenizer.from_config(A ) __magic_name__ = model_from_config(A ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def __A ( self ) -> Tuple: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run __magic_name__ = 12_31_23 for max_length in [3, 5, 10_24]: __magic_name__ = tf.convert_to_tensor([self.test_sentences[0]] ) __magic_name__ = tf_tokenizer(A , max_length=A ) __magic_name__ = out['''input_ids'''].numpy().shape[1] assert out_length == max_length
713
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a_ : Optional[int] = 16 a_ : int = 32 def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ): __magic_name__ = AutoTokenizer.from_pretrained(snake_case_ ) __magic_name__ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case_ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __magic_name__ = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case_ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __magic_name__ = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) __magic_name__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ): model.eval() __magic_name__ = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __magic_name__ , __magic_name__ = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: __magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] __magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) __magic_name__ = metric.compute() return eval_metric["accuracy"] def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ): # Initialize accelerator __magic_name__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __magic_name__ = config['''lr'''] __magic_name__ = int(config['''num_epochs'''] ) __magic_name__ = int(config['''seed'''] ) __magic_name__ = int(config['''batch_size'''] ) __magic_name__ = args.model_name_or_path set_seed(snake_case_ ) __magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer __magic_name__ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: __magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __magic_name__ = 1 __magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __magic_name__ = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: __magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over __magic_name__ = 0 # We also need to keep track of the stating epoch so files are named properly __magic_name__ = 0 __magic_name__ = evaluate.load('''glue''' , '''mrpc''' ) __magic_name__ = num_epochs if args.partial_train_epoch is not None: __magic_name__ = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) __magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1] __magic_name__ = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break __magic_name__ = int(snake_case_ ) + 1 __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) accelerator.print('''resumed checkpoint performance:''' , snake_case_ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f: __magic_name__ = json.load(snake_case_ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model __magic_name__ = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.loss __magic_name__ = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 __magic_name__ = f'epoch_{epoch}' __magic_name__ = os.path.join(args.output_dir , snake_case_ ) accelerator.save_state(snake_case_ ) __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __magic_name__ = accuracy __magic_name__ = lr_scheduler.get_lr()[0] __magic_name__ = optimizer.param_groups[0]['''lr'''] __magic_name__ = epoch __magic_name__ = overall_step accelerator.print(f'epoch {epoch}:' , snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f: json.dump(snake_case_ , snake_case_ ) def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , ) parser.add_argument( '''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , ) __magic_name__ = parser.parse_args() __magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
678
0
import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __A ( self ) -> int: '''simple docstring''' __magic_name__ = tempfile.mkdtemp() __magic_name__ = 5 # Realm tok __magic_name__ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __magic_name__ = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(A , exist_ok=A ) __magic_name__ = os.path.join(A , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __magic_name__ = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(A , exist_ok=A ) def __A ( self ) -> RealmTokenizer: '''simple docstring''' return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def __A ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self ) -> List[Any]: '''simple docstring''' __magic_name__ = RealmConfig(num_block_records=self.num_block_records ) return config def __A ( self ) -> Optional[Any]: '''simple docstring''' __magic_name__ = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def __A ( self ) -> str: '''simple docstring''' __magic_name__ = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=A , ) return block_records def __A ( self ) -> str: '''simple docstring''' __magic_name__ = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def __A ( self ) -> str: '''simple docstring''' __magic_name__ = self.get_config() __magic_name__ = self.get_dummy_retriever() __magic_name__ = retriever.tokenizer __magic_name__ = np.array([0, 3] , dtype='''long''' ) __magic_name__ = tokenizer(['''Test question'''] ).input_ids __magic_name__ = tokenizer( ['''the fourth'''] , add_special_tokens=A , return_token_type_ids=A , return_attention_mask=A , ).input_ids __magic_name__ = config.reader_seq_len __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = retriever( A , A , answer_ids=A , max_length=A , return_tensors='''np''' ) self.assertEqual(len(A ) , 2 ) self.assertEqual(len(A ) , 2 ) self.assertEqual(len(A ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def __A ( self ) -> List[str]: '''simple docstring''' __magic_name__ = self.get_config() __magic_name__ = self.get_dummy_retriever() __magic_name__ = retriever.tokenizer __magic_name__ = np.array([0, 3, 5] , dtype='''long''' ) __magic_name__ = tokenizer(['''Test question'''] ).input_ids __magic_name__ = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=A , return_token_type_ids=A , return_attention_mask=A , ).input_ids __magic_name__ = config.reader_seq_len __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = retriever( A , A , answer_ids=A , max_length=A , return_tensors='''np''' ) self.assertEqual([False, True, True] , A ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A ) def __A ( self ) -> str: '''simple docstring''' __magic_name__ = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path __magic_name__ = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: __magic_name__ = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) __magic_name__ = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
714
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): return " ".join( ''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
678
0
import pytest a_ : Dict = '__dummy_dataset1__' a_ : Union[str, Any] = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def _SCREAMING_SNAKE_CASE ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def _SCREAMING_SNAKE_CASE ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : int ): __magic_name__ = dataset_loading_script_name __magic_name__ = tmp_path / '''datasets''' / script_name script_dir.mkdir(parents=snake_case_ ) __magic_name__ = script_dir / f'{script_name}.py' with open(snake_case_ , '''w''' ) as f: f.write(snake_case_ ) return str(snake_case_ )
715
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): """simple docstring""" def __A ( self ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]: '''simple docstring''' __magic_name__ = compute_mauve( p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , ) return out
678
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str: '''simple docstring''' __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope __magic_name__ = self.vocab_size - 1 def __A ( self ) -> str: '''simple docstring''' __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self , A , A , A , A , *A ) -> Tuple: '''simple docstring''' __magic_name__ = OpenAIGPTModel(config=A ) model.to(A ) model.eval() __magic_name__ = model(A , token_type_ids=A , head_mask=A ) __magic_name__ = model(A , token_type_ids=A ) __magic_name__ = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , A , A , A , A , *A ) -> Dict: '''simple docstring''' __magic_name__ = OpenAIGPTLMHeadModel(A ) model.to(A ) model.eval() __magic_name__ = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , A , A , A , A , *A ) -> List[Any]: '''simple docstring''' __magic_name__ = OpenAIGPTDoubleHeadsModel(A ) model.to(A ) model.eval() __magic_name__ = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , A , A , A , A , *A ) -> Optional[int]: '''simple docstring''' __magic_name__ = self.num_labels __magic_name__ = OpenAIGPTForSequenceClassification(A ) model.to(A ) model.eval() __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _a = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _a = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _a = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __A ( self , A , A , A , A , A ) -> List[str]: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __A ( self , A , A , A=False ) -> List[str]: '''simple docstring''' __magic_name__ = super()._prepare_for_class(A , A , return_labels=A ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __magic_name__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , ) __magic_name__ = inputs_dict['''labels'''] __magic_name__ = inputs_dict['''labels'''] __magic_name__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , ) __magic_name__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A ) return inputs_dict def __A ( self ) -> str: '''simple docstring''' __magic_name__ = OpenAIGPTModelTester(self ) __magic_name__ = ConfigTester(self , config_class=A , n_embd=37 ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*A ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*A ) def __A ( self ) -> List[str]: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*A ) def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A ) @slow def __A ( self ) -> List[str]: '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = OpenAIGPTModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @slow def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(A ) __magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is __magic_name__ = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the __magic_name__ = model.generate(A , do_sample=A ) self.assertListEqual(output_ids[0].tolist() , A )
716
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007 def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ): return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) ) def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ): return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2) if __name__ == "__main__": def _SCREAMING_SNAKE_CASE ( ): from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) ) benchmark()
678
0
import os import zipfile import pytest from datasets.utils.extract import ( BzipaExtractor, Extractor, GzipExtractor, LzaExtractor, SevenZipExtractor, TarExtractor, XzExtractor, ZipExtractor, ZstdExtractor, ) from .utils import require_lza, require_pyazr, require_zstandard @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : int , snake_case_ : Any , ): __magic_name__ = { '''7z''': (seven_zip_file, SevenZipExtractor), '''bz2''': (bza_file, BzipaExtractor), '''gzip''': (gz_file, GzipExtractor), '''lz4''': (lza_file, LzaExtractor), '''tar''': (tar_file, TarExtractor), '''xz''': (xz_file, XzExtractor), '''zip''': (zip_file, ZipExtractor), '''zstd''': (zstd_file, ZstdExtractor), } __magic_name__ , __magic_name__ = input_paths_and_base_extractors[compression_format] if input_path is None: __magic_name__ = f'for \'{compression_format}\' compression_format, ' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case_ ) assert base_extractor.is_extractable(snake_case_ ) __magic_name__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') base_extractor.extract(snake_case_ , snake_case_ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name __magic_name__ = file_path.read_text(encoding='''utf-8''' ) else: __magic_name__ = output_path.read_text(encoding='''utf-8''' ) __magic_name__ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.mark.parametrize( '''compression_format, is_archive''' , [ ('''7z''', True), ('''bz2''', False), ('''gzip''', False), ('''lz4''', False), ('''tar''', True), ('''xz''', False), ('''zip''', True), ('''zstd''', False), ] , ) def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , ): __magic_name__ = { '''7z''': seven_zip_file, '''bz2''': bza_file, '''gzip''': gz_file, '''lz4''': lza_file, '''tar''': tar_file, '''xz''': xz_file, '''zip''': zip_file, '''zstd''': zstd_file, } __magic_name__ = input_paths[compression_format] if input_path is None: __magic_name__ = f'for \'{compression_format}\' compression_format, ' if compression_format == "7z": reason += require_pyazr.kwargs["reason"] elif compression_format == "lz4": reason += require_lza.kwargs["reason"] elif compression_format == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case_ ) __magic_name__ = Extractor.infer_extractor_format(snake_case_ ) assert extractor_format is not None __magic_name__ = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''') Extractor.extract(snake_case_ , snake_case_ , snake_case_ ) if is_archive: assert output_path.is_dir() for file_path in output_path.iterdir(): assert file_path.name == text_file.name __magic_name__ = file_path.read_text(encoding='''utf-8''' ) else: __magic_name__ = output_path.read_text(encoding='''utf-8''' ) __magic_name__ = text_file.read_text(encoding='''utf-8''' ) assert extracted_file_content == expected_file_content @pytest.fixture def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] ): import tarfile __magic_name__ = tmp_path / '''data_dot_dot''' directory.mkdir() __magic_name__ = directory / '''tar_file_with_dot_dot.tar''' with tarfile.TarFile(snake_case_ , '''w''' ) as f: f.add(snake_case_ , arcname=os.path.join('''..''' , text_file.name ) ) return path @pytest.fixture def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): import tarfile __magic_name__ = tmp_path / '''data_sym_link''' directory.mkdir() __magic_name__ = directory / '''tar_file_with_sym_link.tar''' os.symlink('''..''' , directory / '''subdir''' , target_is_directory=snake_case_ ) with tarfile.TarFile(snake_case_ , '''w''' ) as f: f.add(str(directory / '''subdir''' ) , arcname='''subdir''' ) # str required by os.readlink on Windows and Python < 3.8 return path @pytest.mark.parametrize( '''insecure_tar_file, error_log''' , [('''tar_file_with_dot_dot''', '''illegal path'''), ('''tar_file_with_sym_link''', '''Symlink''')] , ) def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int ): __magic_name__ = { '''tar_file_with_dot_dot''': tar_file_with_dot_dot, '''tar_file_with_sym_link''': tar_file_with_sym_link, } __magic_name__ = insecure_tar_files[insecure_tar_file] __magic_name__ = tmp_path / '''extracted''' TarExtractor.extract(snake_case_ , snake_case_ ) assert caplog.text for record in caplog.records: assert record.levelname == "ERROR" assert error_log in record.msg def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ): # We should have less false positives than zipfile.is_zipfile # We do that by checking only the magic number __magic_name__ = tmpdir / '''not_a_zip_file''' # From: https://github.com/python/cpython/pull/5053 __magic_name__ = ( B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00''' B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I''' B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07''' B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82''' ) with not_a_zip_file.open('''wb''' ) as f: f.write(snake_case_ ) assert zipfile.is_zipfile(str(snake_case_ ) ) # is a false positive for `zipfile` assert not ZipExtractor.is_extractable(snake_case_ ) # but we're right
717
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a_ : str = logging.get_logger(__name__) a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/' a_ : List[Any] = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ): if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: __magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: __magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: __magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: __magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: __magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: __magic_name__ = key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: __magic_name__ = key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: __magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ): __magic_name__ = {} import re __magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) __magic_name__ = re.compile( r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) __magic_name__ = re.compile( r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) __magic_name__ = re.compile( r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(snake_case_ ): __magic_name__ = re_encoder_block_conv_in.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) __magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}' __magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ ) elif re_encoder_block_resnet.fullmatch(snake_case_ ): __magic_name__ = re_encoder_block_resnet.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) __magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]] __magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.' __magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' __magic_name__ = prefix + resnet_block __magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ ) elif re_encoder_block_proj_out.fullmatch(snake_case_ ): __magic_name__ = re_encoder_block_proj_out.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}' __magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(snake_case_ ): __magic_name__ = re_decoder_block_conv_out.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2 __magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}' __magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ ) elif re_decoder_block_resnet.fullmatch(snake_case_ ): __magic_name__ = re_decoder_block_resnet.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2 __magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]] __magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.' __magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' __magic_name__ = prefix + resnet_block __magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ ) elif re_decoder_block_proj_in.fullmatch(snake_case_ ): __magic_name__ = re_decoder_block_proj_in.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}' __magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(snake_case_ ): __magic_name__ = re_prior_cond_conv_out.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2 __magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}' __magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ ) elif re_prior_cond_resnet.fullmatch(snake_case_ ): __magic_name__ = re_prior_cond_resnet.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2 __magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]] __magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.' __magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' __magic_name__ = prefix + resnet_block __magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ ) elif re_prior_cond_proj_in.fullmatch(snake_case_ ): __magic_name__ = re_prior_cond_proj_in.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}' __magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ ) # keep original key else: __magic_name__ = original_key __magic_name__ = replace_key(snake_case_ ) if f'{key_prefix}.{key}' not in model_state_dict or key is None: print(f'failed converting {original_key} to {key}, does not match' ) # handle missmatched shape elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape: __magic_name__ = model_state_dict[f'{key_prefix}.{key}'] print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' ) __magic_name__ = original_key __magic_name__ = original_key __magic_name__ = value return new_dict @torch.no_grad() def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ): __magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ ) os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ ) open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content ) __magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]] __magic_name__ = JukeboxConfig.from_pretrained(snake_case_ ) __magic_name__ = JukeboxModel(snake_case_ ) __magic_name__ = [] __magic_name__ = {} for i, dict_name in enumerate(snake_case_ ): __magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model'''] __magic_name__ = {} for k in old_dic.keys(): if k.endswith('''.b''' ): __magic_name__ = old_dic[k] elif k.endswith('''.w''' ): __magic_name__ = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: __magic_name__ = old_dic[k] else: __magic_name__ = old_dic[k] __magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}' __magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ ) weight_dict.append(snake_case_ ) __magic_name__ = weight_dict.pop(0 ) model.vqvae.load_state_dict(snake_case_ ) for i in range(len(snake_case_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile: json.dump(snake_case_ , snake_case_ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case_ ) return weight_dict if __name__ == "__main__": a_ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) a_ : int = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
678
0
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ): __magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] ) if ( min(snake_case_ , snake_case_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) __magic_name__ = 0 count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
718
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : int = logging.get_logger(__name__) a_ : Optional[int] = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _a = """table-transformer""" _a = ["""past_key_values"""] _a = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(A , A ): __magic_name__ = backbone_config.get('''model_type''' ) __magic_name__ = CONFIG_MAPPING[backbone_model_type] __magic_name__ = config_class.from_dict(A ) # set timm attributes to None __magic_name__ , __magic_name__ , __magic_name__ = None, None, None __magic_name__ = use_timm_backbone __magic_name__ = backbone_config __magic_name__ = num_channels __magic_name__ = num_queries __magic_name__ = d_model __magic_name__ = encoder_ffn_dim __magic_name__ = encoder_layers __magic_name__ = encoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = decoder_layers __magic_name__ = decoder_attention_heads __magic_name__ = dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = activation_function __magic_name__ = init_std __magic_name__ = init_xavier_std __magic_name__ = encoder_layerdrop __magic_name__ = decoder_layerdrop __magic_name__ = encoder_layers __magic_name__ = auxiliary_loss __magic_name__ = position_embedding_type __magic_name__ = backbone __magic_name__ = use_pretrained_backbone __magic_name__ = dilation # Hungarian matcher __magic_name__ = class_cost __magic_name__ = bbox_cost __magic_name__ = giou_cost # Loss coefficients __magic_name__ = mask_loss_coefficient __magic_name__ = dice_loss_coefficient __magic_name__ = bbox_loss_coefficient __magic_name__ = giou_loss_coefficient __magic_name__ = eos_coefficient super().__init__(is_encoder_decoder=A , **A ) @property def __A ( self ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def __A ( self ) -> int: '''simple docstring''' return self.d_model class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _a = version.parse("""1.11""" ) @property def __A ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def __A ( self ) -> float: '''simple docstring''' return 1E-5 @property def __A ( self ) -> int: '''simple docstring''' return 12
678
0
from __future__ import annotations a_ : Optional[Any] = [] def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int ): for i in range(len(snake_case_ ) ): if board[row][i] == 1: return False for i in range(len(snake_case_ ) ): if board[i][column] == 1: return False for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , len(snake_case_ ) ) ): if board[i][j] == 1: return False return True def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int ): if row >= len(snake_case_ ): solution.append(snake_case_ ) printboard(snake_case_ ) print() return True for i in range(len(snake_case_ ) ): if is_safe(snake_case_ , snake_case_ , snake_case_ ): __magic_name__ = 1 solve(snake_case_ , row + 1 ) __magic_name__ = 0 return False def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] ): for i in range(len(snake_case_ ) ): for j in range(len(snake_case_ ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) a_ : Any = 8 a_ : Optional[int] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
719
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ): # Initialise PyTorch model __magic_name__ = LxmertConfig.from_json_file(snake_case_ ) print(f'Building PyTorch model from configuration: {config}' ) __magic_name__ = LxmertForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case_ ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
678
0
'''simple docstring''' import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class SCREAMING_SNAKE_CASE_ ( nn.Module ): """simple docstring""" def __init__( self ) -> Tuple: '''simple docstring''' super().__init__() __magic_name__ = nn.Linear(3 , 4 ) __magic_name__ = nn.BatchNormad(4 ) __magic_name__ = nn.Linear(4 , 5 ) def __A ( self , A ) -> Tuple: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(A ) ) ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(A , model.state_dict() ) __magic_name__ = os.path.join(A , '''index.json''' ) self.assertTrue(os.path.isfile(A ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __magic_name__ = os.path.join(A , F'{key}.dat' ) self.assertTrue(os.path.isfile(A ) ) # TODO: add tests on the fact weights are properly loaded def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __magic_name__ = torch.randn(2 , 3 , dtype=A ) with TemporaryDirectory() as tmp_dir: __magic_name__ = offload_weight(A , '''weight''' , A , {} ) __magic_name__ = os.path.join(A , '''weight.dat''' ) self.assertTrue(os.path.isfile(A ) ) self.assertDictEqual(A , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(A ).split('''.''' )[1]}} ) __magic_name__ = load_offloaded_weight(A , index['''weight'''] ) self.assertTrue(torch.equal(A , A ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = ModelForTest() __magic_name__ = model.state_dict() __magic_name__ = {k: v for k, v in state_dict.items() if '''linear2''' not in k} __magic_name__ = {k: v for k, v in state_dict.items() if '''linear2''' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(A , A ) __magic_name__ = OffloadedWeightsLoader(state_dict=A , save_folder=A ) # Every key is there with the right value self.assertEqual(sorted(A ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(A , weight_map[key] ) ) __magic_name__ = {k: v for k, v in state_dict.items() if '''weight''' in k} __magic_name__ = {k: v for k, v in state_dict.items() if '''weight''' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(A , A ) __magic_name__ = OffloadedWeightsLoader(state_dict=A , save_folder=A ) # Every key is there with the right value self.assertEqual(sorted(A ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(A , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(A , A ) # Duplicates are removed __magic_name__ = OffloadedWeightsLoader(state_dict=A , save_folder=A ) # Every key is there with the right value self.assertEqual(sorted(A ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(A , weight_map[key] ) ) def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2} __magic_name__ = extract_submodules_state_dict(A , ['''a.1''', '''a.2'''] ) self.assertDictEqual(A , {'''a.1''': 0, '''a.2''': 2} ) __magic_name__ = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2} __magic_name__ = extract_submodules_state_dict(A , ['''a.1''', '''a.2'''] ) self.assertDictEqual(A , {'''a.1.a''': 0, '''a.2.a''': 2} )
720
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ): __magic_name__ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] __magic_name__ = { '''wmt16-en-de-dist-12-1''': [28.3, 27.52], '''wmt16-en-de-dist-6-1''': [27.4, 27.11], '''wmt16-en-de-12-1''': [26.9, 25.75], } __magic_name__ = f'{src_lang}-{tgt_lang}' __magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n' model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ ) __magic_name__ = os.path.join(snake_case_ , '''README.md''' ) print(f'Generating {path}' ) with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f: f.write(snake_case_ ) # make sure we are under the root of the project a_ : Tuple = Path(__file__).resolve().parent.parent.parent a_ : Dict = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: a_ : List[str] = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
678
0
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ): def count_of_possible_combinations(snake_case_ : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case_ ) def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ): def count_of_possible_combinations_with_dp_array( snake_case_ : int , snake_case_ : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] __magic_name__ = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case_ ) for item in array ) __magic_name__ = answer return answer __magic_name__ = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case_ , snake_case_ ) def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : list[int] , snake_case_ : int ): __magic_name__ = [0] * (target + 1) __magic_name__ = 1 for i in range(1 , target + 1 ): for j in range(snake_case_ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() a_ : Dict = 3 a_ : int = 5 a_ : Any = [1, 2, 5] print(combination_sum_iv(n, array, target))
721
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ): __magic_name__ = len(snake_case_ ) print('''The following activities are selected:''' ) # The first activity is always selected __magic_name__ = 0 print(snake_case_ , end=''',''' ) # Consider rest of the activities for j in range(snake_case_ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(snake_case_ , end=''',''' ) __magic_name__ = j if __name__ == "__main__": import doctest doctest.testmod() a_ : Dict = [1, 3, 0, 5, 8, 5] a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
678
0
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType a_ : Any = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _a = """vision-encoder-decoder""" _a = True def __init__( self , **A ) -> Optional[Any]: '''simple docstring''' super().__init__(**A ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'A configuraton of type {self.model_type} cannot be instantiated because ' F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' ) __magic_name__ = kwargs.pop('''encoder''' ) __magic_name__ = encoder_config.pop('''model_type''' ) __magic_name__ = kwargs.pop('''decoder''' ) __magic_name__ = decoder_config.pop('''model_type''' ) __magic_name__ = AutoConfig.for_model(A , **A ) __magic_name__ = AutoConfig.for_model(A , **A ) __magic_name__ = True @classmethod def __A ( cls , A , A , **A ) -> PretrainedConfig: '''simple docstring''' logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) __magic_name__ = True __magic_name__ = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **A ) def __A ( self ) -> str: '''simple docstring''' __magic_name__ = copy.deepcopy(self.__dict__ ) __magic_name__ = self.encoder.to_dict() __magic_name__ = self.decoder.to_dict() __magic_name__ = self.__class__.model_type return output class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _a = version.parse("""1.11""" ) @property def __A ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __A ( self ) -> float: '''simple docstring''' return 1E-4 @property def __A ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def __A ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __magic_name__ = OrderedDict() __magic_name__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} __magic_name__ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} __magic_name__ = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def __A ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]: '''simple docstring''' import torch __magic_name__ = OrderedDict() __magic_name__ = super().generate_dummy_inputs( A , batch_size=A , seq_length=A , is_pair=A , framework=A ) __magic_name__ , __magic_name__ = dummy_input['''input_ids'''].shape __magic_name__ = (batch, encoder_sequence, self._config.encoder_hidden_size) __magic_name__ = dummy_input.pop('''input_ids''' ) __magic_name__ = dummy_input.pop('''attention_mask''' ) __magic_name__ = torch.zeros(A ) return common_inputs class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @property def __A ( self ) -> None: '''simple docstring''' pass def __A ( self , A ) -> OnnxConfig: '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(A ) def __A ( self , A , A , A = "default" ) -> OnnxConfig: '''simple docstring''' __magic_name__ = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(A , A )
700
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder a_ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name a_ : List[str] = 256 class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _a = ["""melgan"""] def __init__( self , A , A , A , A , A , ) -> None: '''simple docstring''' super().__init__() # From MELGAN __magic_name__ = math.log(1E-5 ) # Matches MelGAN training. __magic_name__ = 4.0 # Largest value for most examples __magic_name__ = 1_28 self.register_modules( notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , ) def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> List[Any]: '''simple docstring''' __magic_name__ , __magic_name__ = output_range if clip: __magic_name__ = torch.clip(A , self.min_value , self.max_value ) # Scale to [0, 1]. __magic_name__ = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __A ( self , A , A=(-1.0, 1.0) , A=False ) -> Optional[int]: '''simple docstring''' __magic_name__ , __magic_name__ = input_range __magic_name__ = torch.clip(A , A , A ) if clip else outputs # Scale to [0, 1]. __magic_name__ = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __A ( self , A , A , A ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = input_tokens > 0 __magic_name__ , __magic_name__ = self.notes_encoder( encoder_input_tokens=A , encoder_inputs_mask=A ) __magic_name__ , __magic_name__ = self.continuous_encoder( encoder_inputs=A , encoder_inputs_mask=A ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __A ( self , A , A , A ) -> Optional[int]: '''simple docstring''' __magic_name__ = noise_time if not torch.is_tensor(A ): __magic_name__ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(A ) and len(timesteps.shape ) == 0: __magic_name__ = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __magic_name__ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) __magic_name__ = self.decoder( encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A ) return logits @torch.no_grad() def __call__( self , A , A = None , A = 1_00 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]: '''simple docstring''' if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(A )}.' ) __magic_name__ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) __magic_name__ = np.zeros([1, 0, self.n_dims] , np.floataa ) __magic_name__ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device ) for i, encoder_input_tokens in enumerate(A ): if i == 0: __magic_name__ = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. __magic_name__ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. __magic_name__ = ones __magic_name__ = self.scale_features( A , output_range=[-1.0, 1.0] , clip=A ) __magic_name__ = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop __magic_name__ = randn_tensor( shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(A ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): __magic_name__ = self.decode( encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 __magic_name__ = self.scheduler.step(A , A , A , generator=A ).prev_sample __magic_name__ = self.scale_to_features(A , input_range=[-1.0, 1.0] ) __magic_name__ = mel[:1] __magic_name__ = mel.cpu().float().numpy() __magic_name__ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A ) logger.info('''Generated segment''' , A ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": __magic_name__ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: __magic_name__ = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=A )
678
0
import colorsys from PIL import Image # type: ignore def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : int ): __magic_name__ = x __magic_name__ = y for step in range(snake_case_ ): # noqa: B007 __magic_name__ = a * a - b * b + x __magic_name__ = 2 * a * b + y __magic_name__ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _SCREAMING_SNAKE_CASE ( snake_case_ : float ): if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def _SCREAMING_SNAKE_CASE ( snake_case_ : float ): if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case_ , 1 , 1 ) ) def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 800 , snake_case_ : int = 600 , snake_case_ : float = -0.6 , snake_case_ : float = 0 , snake_case_ : float = 3.2 , snake_case_ : int = 50 , snake_case_ : bool = True , ): __magic_name__ = Image.new('''RGB''' , (image_width, image_height) ) __magic_name__ = img.load() # loop through the image-coordinates for image_x in range(snake_case_ ): for image_y in range(snake_case_ ): # determine the figure-coordinates based on the image-coordinates __magic_name__ = figure_width / image_width * image_height __magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width __magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height __magic_name__ = get_distance(snake_case_ , snake_case_ , snake_case_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: __magic_name__ = get_color_coded_rgb(snake_case_ ) else: __magic_name__ = get_black_and_white_rgb(snake_case_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure a_ : Any = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
701
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
678
0
import logging import os from typing import List, Tuple import numpy as np import psutil import torch import torch.distributed as dist from transformers import RagRetriever a_ : Optional[Any] = logging.getLogger(__name__) class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , A , A , A , A=None ) -> List[Any]: '''simple docstring''' super().__init__( A , question_encoder_tokenizer=A , generator_tokenizer=A , index=A , init_retrieval=A , ) __magic_name__ = None def __A ( self , A ) -> str: '''simple docstring''' logger.info('''initializing retrieval''' ) # initializing a separate process group for retrieval as the default # nccl backend doesn't support gather/scatter operations while gloo # is too slow to replace nccl for the core gpu communication if dist.is_initialized(): logger.info('''dist initialized''' ) # needs to be set manually __magic_name__ = self._infer_socket_ifname() # avoid clash with the NCCL port __magic_name__ = str(distributed_port + 1 ) __magic_name__ = dist.new_group(ranks=A , backend='''gloo''' ) # initialize retriever only on the main worker if not dist.is_initialized() or self._is_main(): logger.info('''dist not initialized / main''' ) self.index.init_index() # all processes wait untill the retriever is initialized by the main process if dist.is_initialized(): torch.distributed.barrier(group=self.process_group ) def __A ( self ) -> str: '''simple docstring''' return dist.get_rank(group=self.process_group ) == 0 def __A ( self , A , A , A=torch.floataa ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = torch.empty(A , dtype=A ) dist.scatter(A , src=0 , scatter_list=A , group=self.process_group ) return target_tensor def __A ( self ) -> str: '''simple docstring''' __magic_name__ = psutil.net_if_addrs() # a hacky way to deal with varying network interface names __magic_name__ = next((addr for addr in addrs if addr.startswith('''e''' )) , A ) return ifname def __A ( self , A , A ) -> Tuple[np.ndarray, List[dict]]: '''simple docstring''' if not dist.is_initialized(): __magic_name__ , __magic_name__ = self._main_retrieve(A , A ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A ) # distributed training __magic_name__ = dist.get_world_size(group=self.process_group ) # gather logic __magic_name__ = None if self._is_main(): __magic_name__ = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A )] dist.gather(torch.tensor(A ) , dst=0 , gather_list=A , group=self.process_group ) # scatter logic __magic_name__ = question_hidden_states.shape[0] __magic_name__ = [] __magic_name__ = [] if self._is_main(): assert len(A ) == world_size __magic_name__ , __magic_name__ = self._main_retrieve(torch.cat(A ).numpy() , A ) __magic_name__ , __magic_name__ = torch.tensor(A ), torch.tensor(A ) __magic_name__ = self._chunk_tensor(A , A ) __magic_name__ = self._chunk_tensor(A , A ) __magic_name__ = self._scattered(A , [n_queries, n_docs] , target_type=torch.intaa ) __magic_name__ = self._scattered(A , [n_queries, n_docs, question_hidden_states.shape[1]] ) return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A )
702
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ): __magic_name__ = SwinConfig(image_size=192 ) if "base" in model_name: __magic_name__ = 6 __magic_name__ = 128 __magic_name__ = (2, 2, 18, 2) __magic_name__ = (4, 8, 16, 32) elif "large" in model_name: __magic_name__ = 12 __magic_name__ = 192 __magic_name__ = (2, 2, 18, 2) __magic_name__ = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) __magic_name__ = window_size __magic_name__ = embed_dim __magic_name__ = depths __magic_name__ = num_heads return config def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ): if "encoder.mask_token" in name: __magic_name__ = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: __magic_name__ = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: __magic_name__ = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: __magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __magic_name__ = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __magic_name__ = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __magic_name__ = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": __magic_name__ = '''layernorm.weight''' if name == "encoder.norm.bias": __magic_name__ = '''layernorm.bias''' if "decoder" in name: pass else: __magic_name__ = '''swin.''' + name return name def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any ): for key in orig_state_dict.copy().keys(): __magic_name__ = orig_state_dict.pop(snake_case_ ) if "attn_mask" in key: pass elif "qkv" in key: __magic_name__ = key.split('''.''' ) __magic_name__ = int(key_split[2] ) __magic_name__ = int(key_split[4] ) __magic_name__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __magic_name__ = val[:dim, :] __magic_name__ = val[ dim : dim * 2, : ] __magic_name__ = val[-dim:, :] else: __magic_name__ = val[ :dim ] __magic_name__ = val[ dim : dim * 2 ] __magic_name__ = val[ -dim: ] else: __magic_name__ = val return orig_state_dict def _SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Any , snake_case_ : str ): __magic_name__ = torch.load(snake_case_ , map_location='''cpu''' )['''model'''] __magic_name__ = get_swin_config(snake_case_ ) __magic_name__ = SwinForMaskedImageModeling(snake_case_ ) model.eval() __magic_name__ = convert_state_dict(snake_case_ , snake_case_ ) model.load_state_dict(snake_case_ ) __magic_name__ = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __magic_name__ = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) __magic_name__ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) __magic_name__ = image_processor(images=snake_case_ , return_tensors='''pt''' ) with torch.no_grad(): __magic_name__ = model(**snake_case_ ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case_ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case_ ) if push_to_hub: print(f'Pushing model and image processor for {model_name} to hub' ) model.push_to_hub(f'microsoft/{model_name}' ) image_processor.push_to_hub(f'microsoft/{model_name}' ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='swin-base-simmim-window6-192', type=str, choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'], help='Name of the Swin SimMIM model you\'d like to convert.', ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth', type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a_ : Optional[Any] = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
678
0
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): def wrapper(*snake_case_ : int , **snake_case_ : Optional[Any] ): __magic_name__ = timeit.default_timer() __magic_name__ = func(*snake_case_ , **snake_case_ ) __magic_name__ = timeit.default_timer() - starttime return delta __magic_name__ = func.__name__ return wrapper def _SCREAMING_SNAKE_CASE ( snake_case_ : dict , snake_case_ : Any=100 , snake_case_ : int=None ): __magic_name__ = [] __magic_name__ = seq_shapes or {} for i in range(snake_case_ ): __magic_name__ = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(snake_case_ , _ArrayXD ): __magic_name__ = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(snake_case_ , datasets.Value ): if v.dtype == "string": __magic_name__ = '''The small grey turtle was surprisingly fast when challenged.''' else: __magic_name__ = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(snake_case_ , datasets.Sequence ): while isinstance(snake_case_ , datasets.Sequence ): __magic_name__ = v.feature __magic_name__ = seq_shapes[k] __magic_name__ = np.random.rand(*snake_case_ ).astype(v.dtype ) __magic_name__ = data dummy_data.append((i, example) ) return dummy_data def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Optional[Any]=100 , snake_case_ : Dict=None ): __magic_name__ = generate_examples(snake_case_ , num_examples=snake_case_ , seq_shapes=snake_case_ ) with ArrowWriter(features=snake_case_ , path=snake_case_ ) as writer: for key, record in dummy_data: __magic_name__ = features.encode_example(snake_case_ ) writer.write(snake_case_ ) __magic_name__ , __magic_name__ = writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) __magic_name__ = datasets.Dataset.from_file(filename=snake_case_ , info=datasets.DatasetInfo(features=snake_case_ ) ) return dataset
703
from __future__ import annotations import collections import pprint from pathlib import Path def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): return "".join(sorted(snake_case_ ) ) def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): return word_by_signature[signature(snake_case_ )] a_ : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8') a_ : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()}) a_ : List[Any] = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a_ : Optional[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('anagrams.txt', 'w') as file: file.write('all_anagrams = \n ') file.write(pprint.pformat(all_anagrams))
678
0
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any ): __magic_name__ = UniSpeechSatForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ ) __magic_name__ = downstream_dict['''projector.weight'''] __magic_name__ = downstream_dict['''projector.bias'''] __magic_name__ = downstream_dict['''model.post_net.linear.weight'''] __magic_name__ = downstream_dict['''model.post_net.linear.bias'''] return model def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Any ): __magic_name__ = UniSpeechSatForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ ) __magic_name__ = downstream_dict['''model.linear.weight'''] __magic_name__ = downstream_dict['''model.linear.bias'''] return model def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Optional[Any] ): __magic_name__ = UniSpeechSatForXVector.from_pretrained(snake_case_ , config=snake_case_ ) __magic_name__ = downstream_dict['''connector.weight'''] __magic_name__ = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] __magic_name__ = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias'] __magic_name__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] __magic_name__ = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] __magic_name__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] __magic_name__ = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] __magic_name__ = downstream_dict['''objective.W'''] return model @torch.no_grad() def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : List[str] , snake_case_ : Tuple ): __magic_name__ = torch.load(snake_case_ , map_location='''cpu''' ) __magic_name__ = checkpoint['''Downstream'''] __magic_name__ = UniSpeechSatConfig.from_pretrained(snake_case_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): __magic_name__ = convert_classification(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('''ForAudioFrameClassification''' ): __magic_name__ = convert_diarization(snake_case_ , snake_case_ , snake_case_ ) elif arch.endswith('''ForXVector''' ): __magic_name__ = convert_xvector(snake_case_ , snake_case_ , snake_case_ ) else: raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(snake_case_ ) hf_model.save_pretrained(snake_case_ ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') a_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
704
from __future__ import annotations from scipy.special import comb # type: ignore class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , A ) -> Tuple: '''simple docstring''' __magic_name__ = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. __magic_name__ = len(A ) - 1 def __A ( self , A ) -> list[float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __magic_name__ = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , A ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(A ) , 5 ) == 1 return output_values def __A ( self , A ) -> tuple[float, float]: '''simple docstring''' assert 0 <= t <= 1, "Time t must be between 0 and 1." __magic_name__ = self.basis_function(A ) __magic_name__ = 0.0 __magic_name__ = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def __A ( self , A = 0.01 ) -> Tuple: '''simple docstring''' from matplotlib import pyplot as plt # type: ignore __magic_name__ = [] # x coordinates of points to plot __magic_name__ = [] # y coordinates of points to plot __magic_name__ = 0.0 while t <= 1: __magic_name__ = self.bezier_curve_function(A ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size __magic_name__ = [i[0] for i in self.list_of_points] __magic_name__ = [i[1] for i in self.list_of_points] plt.plot( A , A , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(A , A , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
678
0
'''simple docstring''' from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ : str = { 'configuration_trajectory_transformer': [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrajectoryTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Union[str, Any] = [ 'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TrajectoryTransformerModel', 'TrajectoryTransformerPreTrainedModel', 'load_tf_weights_in_trajectory_transformer', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys a_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
705
import re def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): __magic_name__ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(snake_case_ , snake_case_ ) ) if __name__ == "__main__": a_ : Optional[int] = '0094702343221' print(is_sri_lankan_phone_number(phone))
678
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ : Optional[int] = { 'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = ['AlbertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = ['AlbertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ 'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'AlbertForMaskedLM', 'AlbertForMultipleChoice', 'AlbertForPreTraining', 'AlbertForQuestionAnswering', 'AlbertForSequenceClassification', 'AlbertForTokenClassification', 'AlbertModel', 'AlbertPreTrainedModel', 'load_tf_weights_in_albert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = [ 'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAlbertForMaskedLM', 'TFAlbertForMultipleChoice', 'TFAlbertForPreTraining', 'TFAlbertForQuestionAnswering', 'TFAlbertForSequenceClassification', 'TFAlbertForTokenClassification', 'TFAlbertMainLayer', 'TFAlbertModel', 'TFAlbertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = [ 'FlaxAlbertForMaskedLM', 'FlaxAlbertForMultipleChoice', 'FlaxAlbertForPreTraining', 'FlaxAlbertForQuestionAnswering', 'FlaxAlbertForSequenceClassification', 'FlaxAlbertForTokenClassification', 'FlaxAlbertModel', 'FlaxAlbertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
706
import os import sys import unittest a_ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path a_ : Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers') class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = find_backend(''' if not is_torch_available():''' ) self.assertEqual(A , '''torch''' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __magic_name__ = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' ) self.assertEqual(A , '''torch_and_transformers''' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __magic_name__ = find_backend( ''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' ) self.assertEqual(A , '''torch_and_transformers_and_onnx''' ) def __A ( self ) -> str: '''simple docstring''' __magic_name__ = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , A ) self.assertIn('''torch_and_transformers''' , A ) self.assertIn('''flax_and_transformers''' , A ) self.assertIn('''torch_and_transformers_and_onnx''' , A ) # Likewise, we can't assert on the exact content of a key self.assertIn('''UNet2DModel''' , objects['''torch'''] ) self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] ) self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] ) self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] ) self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] ) self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] ) def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(A , '''\nCONSTANT = None\n''' ) __magic_name__ = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( A , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __magic_name__ = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, \'torch\') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, \'torch\') ''' __magic_name__ = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(A , A ) def __A ( self ) -> int: '''simple docstring''' __magic_name__ = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) ''' __magic_name__ = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , A )
678
0
import datasets a_ : List[str] = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' a_ : List[str] = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' a_ : Tuple = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Dict ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): """simple docstring""" def __A ( self ) -> Optional[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def __A ( self , A , A ) -> Optional[int]: '''simple docstring''' return {"accuracy": simple_accuracy(A , A )}
707
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int , snake_case_ : set ): __magic_name__ , __magic_name__ = len(snake_case_ ), len(grid[0] ) if ( min(snake_case_ , snake_case_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) __magic_name__ = 0 count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
678
0
def _SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ): if exponent == 1: return base if exponent % 2 == 0: __magic_name__ = _modexpt(snake_case_ , exponent // 2 , snake_case_ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(snake_case_ , exponent - 1 , snake_case_ )) % modulo_value def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 1777 , snake_case_ : int = 1855 , snake_case_ : int = 8 ): __magic_name__ = base for _ in range(1 , snake_case_ ): __magic_name__ = _modexpt(snake_case_ , snake_case_ , 10**digits ) return result if __name__ == "__main__": print(F"""{solution() = }""")
708
a_ : Dict = { 'meter': 'm', 'kilometer': 'km', 'megametre': 'Mm', 'gigametre': 'Gm', 'terametre': 'Tm', 'petametre': 'Pm', 'exametre': 'Em', 'zettametre': 'Zm', 'yottametre': 'Ym', } # Exponent of the factor(meter) a_ : str = { 'm': 0, 'km': 3, 'Mm': 6, 'Gm': 9, 'Tm': 12, 'Pm': 15, 'Em': 18, 'Zm': 21, 'Ym': 24, } def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : str , snake_case_ : str ): __magic_name__ = from_type.lower().strip('''s''' ) __magic_name__ = to_type.lower().strip('''s''' ) __magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ ) __magic_name__ = UNIT_SYMBOL.get(snake_case_ , snake_case_ ) if from_sanitized not in METRIC_CONVERSION: __magic_name__ = ( f'Invalid \'from_type\' value: {from_type!r}.\n' f'Conversion abbreviations are: {", ".join(snake_case_ )}' ) raise ValueError(snake_case_ ) if to_sanitized not in METRIC_CONVERSION: __magic_name__ = ( f'Invalid \'to_type\' value: {to_type!r}.\n' f'Conversion abbreviations are: {", ".join(snake_case_ )}' ) raise ValueError(snake_case_ ) __magic_name__ = METRIC_CONVERSION[from_sanitized] __magic_name__ = METRIC_CONVERSION[to_sanitized] __magic_name__ = 1 if from_exponent > to_exponent: __magic_name__ = from_exponent - to_exponent else: __magic_name__ = -(to_exponent - from_exponent) return value * pow(10 , snake_case_ ) if __name__ == "__main__": from doctest import testmod testmod()
678
0
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging a_ : Optional[Any] = logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): __magic_name__ = r'''\w+[.]\d+''' __magic_name__ = re.findall(snake_case_ , snake_case_ ) for pat in pats: __magic_name__ = key.replace(snake_case_ , '''_'''.join(pat.split('''.''' ) ) ) return key def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : int ): __magic_name__ = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): __magic_name__ = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: __magic_name__ = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: __magic_name__ = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer __magic_name__ = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: __magic_name__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer __magic_name__ = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": __magic_name__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight __magic_name__ = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias __magic_name__ = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any]=42 ): # Step 1: Convert pytorch tensor to numpy __magic_name__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params __magic_name__ = flax_model.init_weights(PRNGKey(snake_case_ ) ) __magic_name__ = flatten_dict(snake_case_ ) __magic_name__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __magic_name__ = rename_key(snake_case_ ) __magic_name__ = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters __magic_name__ , __magic_name__ = rename_key_and_reshape_tensor(snake_case_ , snake_case_ , snake_case_ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ' f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' ) # also add unexpected weight so that warning is thrown __magic_name__ = jnp.asarray(snake_case_ ) return unflatten_dict(snake_case_ )
709
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a_ : Union[str, Any] = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Dict = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys a_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
678
0
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() __magic_name__ = dict(zip(A , range(len(A ) ) ) ) __magic_name__ = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } __magic_name__ = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_60_00, '''return_attention_mask''': False, '''do_normalize''': True, } __magic_name__ = tempfile.mkdtemp() __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __magic_name__ = os.path.join(self.tmpdirname , A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A ) + '''\n''' ) # load decoder from hub __magic_name__ = '''hf-internal-testing/ngram-beam-search-decoder''' def __A ( self , **A ) -> str: '''simple docstring''' __magic_name__ = self.add_kwargs_tokens_map.copy() kwargs.update(A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A ) def __A ( self , **A ) -> Tuple: '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A ) def __A ( self , **A ) -> int: '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A ) def __A ( self ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __A ( self ) -> Optional[Any]: '''simple docstring''' __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_feature_extractor() __magic_name__ = self.get_decoder() __magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A ) processor.save_pretrained(self.tmpdirname ) __magic_name__ = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , A ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __magic_name__ = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def __A ( self ) -> List[Any]: '''simple docstring''' __magic_name__ = self.get_feature_extractor() __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_decoder() __magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A ) __magic_name__ = floats_list((3, 10_00) ) __magic_name__ = feature_extractor(A , return_tensors='''np''' ) __magic_name__ = processor(A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = self.get_feature_extractor() __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_decoder() __magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A ) __magic_name__ = '''This is a test string''' __magic_name__ = processor(text=A ) __magic_name__ = tokenizer(A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self , A=(2, 10, 16) , A=77 ) -> int: '''simple docstring''' np.random.seed(A ) return np.random.rand(*A ) def __A ( self ) -> str: '''simple docstring''' __magic_name__ = self.get_feature_extractor() __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_decoder() __magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A ) __magic_name__ = self._get_dummy_logits(shape=(10, 16) , seed=13 ) __magic_name__ = processor.decode(A ) __magic_name__ = decoder.decode_beams(A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def __A ( self , A ) -> List[str]: '''simple docstring''' __magic_name__ = self.get_feature_extractor() __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_decoder() __magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A ) __magic_name__ = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __magic_name__ = processor.batch_decode(A ) else: with get_context(A ).Pool() as pool: __magic_name__ = processor.batch_decode(A , A ) __magic_name__ = list(A ) with get_context('''fork''' ).Pool() as p: __magic_name__ = decoder.decode_beams_batch(A , A ) __magic_name__ , __magic_name__ , __magic_name__ = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(A , decoded_processor.logit_score ) self.assertListEqual(A , decoded_processor.lm_score ) def __A ( self ) -> List[str]: '''simple docstring''' __magic_name__ = self.get_feature_extractor() __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_decoder() __magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A ) __magic_name__ = self._get_dummy_logits() __magic_name__ = 15 __magic_name__ = -20.0 __magic_name__ = -4.0 __magic_name__ = processor.batch_decode( A , beam_width=A , beam_prune_logp=A , token_min_logp=A , ) __magic_name__ = decoded_processor_out.text __magic_name__ = list(A ) with get_context('''fork''' ).Pool() as pool: __magic_name__ = decoder.decode_beams_batch( A , A , beam_width=A , beam_prune_logp=A , token_min_logp=A , ) __magic_name__ = [d[0][0] for d in decoded_decoder_out] __magic_name__ = [d[0][2] for d in decoded_decoder_out] __magic_name__ = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(A , A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , A ) self.assertTrue(np.array_equal(A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.0_54, -18.4_47] , A , atol=1E-3 ) ) self.assertTrue(np.array_equal(A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.5_54, -13.94_74] , A , atol=1E-3 ) ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = self.get_feature_extractor() __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_decoder() __magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A ) __magic_name__ = self._get_dummy_logits() __magic_name__ = 2.0 __magic_name__ = 5.0 __magic_name__ = -20.0 __magic_name__ = True __magic_name__ = processor.batch_decode( A , alpha=A , beta=A , unk_score_offset=A , lm_score_boundary=A , ) __magic_name__ = decoded_processor_out.text __magic_name__ = list(A ) decoder.reset_params( alpha=A , beta=A , unk_score_offset=A , lm_score_boundary=A , ) with get_context('''fork''' ).Pool() as pool: __magic_name__ = decoder.decode_beams_batch( A , A , ) __magic_name__ = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(A , A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , A ) __magic_name__ = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , A ) def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __magic_name__ = processor.decoder.model_container[processor.decoder._model_key] __magic_name__ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __magic_name__ = os.listdir(A ) __magic_name__ = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(A , A ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = snapshot_download('''hf-internal-testing/processor_with_lm''' ) __magic_name__ = WavaVecaProcessorWithLM.from_pretrained(A ) __magic_name__ = processor.decoder.model_container[processor.decoder._model_key] __magic_name__ = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __magic_name__ = os.listdir(A ) __magic_name__ = os.listdir(A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(A , A ) def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __magic_name__ = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __magic_name__ = floats_list((3, 10_00) ) __magic_name__ = processor_wavaveca(A , return_tensors='''np''' ) __magic_name__ = processor_auto(A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) __magic_name__ = self._get_dummy_logits() __magic_name__ = processor_wavaveca.batch_decode(A ) __magic_name__ = processor_auto.batch_decode(A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def __A ( self ) -> List[Any]: '''simple docstring''' __magic_name__ = self.get_feature_extractor() __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_decoder() __magic_name__ = WavaVecaProcessorWithLM(tokenizer=A , feature_extractor=A , decoder=A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def __A ( A , A ) -> List[str]: '''simple docstring''' __magic_name__ = [d[key] for d in offsets] return retrieved_list def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __magic_name__ = self._get_dummy_logits()[0] __magic_name__ = processor.decode(A , output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(A , A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __magic_name__ = self._get_dummy_logits() __magic_name__ = processor.batch_decode(A , output_word_offsets=A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(A , A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def __A ( self ) -> Union[str, Any]: '''simple docstring''' import torch __magic_name__ = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=A ) __magic_name__ = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) ) __magic_name__ = iter(A ) __magic_name__ = next(A ) __magic_name__ = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) __magic_name__ = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __magic_name__ = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): __magic_name__ = model(A ).logits.cpu().numpy() __magic_name__ = processor.decode(logits[0] , output_word_offsets=A ) __magic_name__ = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __magic_name__ = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] __magic_name__ = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(A , '''word''' ) ) , A ) self.assertEqual(''' '''.join(self.get_from_offsets(A , '''word''' ) ) , output.text ) # output times __magic_name__ = torch.tensor(self.get_from_offsets(A , '''start_time''' ) ) __magic_name__ = torch.tensor(self.get_from_offsets(A , '''end_time''' ) ) # fmt: off __magic_name__ = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] ) __magic_name__ = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] ) # fmt: on self.assertTrue(torch.allclose(A , A , atol=0.01 ) ) self.assertTrue(torch.allclose(A , A , atol=0.01 ) )
710
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class SCREAMING_SNAKE_CASE_ : """simple docstring""" def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=5_12 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> str: '''simple docstring''' __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope __magic_name__ = self.vocab_size - 1 def __A ( self ) -> str: '''simple docstring''' __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __magic_name__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __A ( self , A , A , A , A , *A ) -> Tuple: '''simple docstring''' __magic_name__ = OpenAIGPTModel(config=A ) model.to(A ) model.eval() __magic_name__ = model(A , token_type_ids=A , head_mask=A ) __magic_name__ = model(A , token_type_ids=A ) __magic_name__ = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , A , A , A , A , *A ) -> Dict: '''simple docstring''' __magic_name__ = OpenAIGPTLMHeadModel(A ) model.to(A ) model.eval() __magic_name__ = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , A , A , A , A , *A ) -> List[Any]: '''simple docstring''' __magic_name__ = OpenAIGPTDoubleHeadsModel(A ) model.to(A ) model.eval() __magic_name__ = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , A , A , A , A , *A ) -> Optional[int]: '''simple docstring''' __magic_name__ = self.num_labels __magic_name__ = OpenAIGPTForSequenceClassification(A ) model.to(A ) model.eval() __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = model(A , token_type_ids=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _a = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) _a = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly _a = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __A ( self , A , A , A , A , A ) -> List[str]: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __A ( self , A , A , A=False ) -> List[str]: '''simple docstring''' __magic_name__ = super()._prepare_for_class(A , A , return_labels=A ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __magic_name__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=A , ) __magic_name__ = inputs_dict['''labels'''] __magic_name__ = inputs_dict['''labels'''] __magic_name__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=A , ) __magic_name__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=A ) return inputs_dict def __A ( self ) -> str: '''simple docstring''' __magic_name__ = OpenAIGPTModelTester(self ) __magic_name__ = ConfigTester(self , config_class=A , n_embd=37 ) def __A ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*A ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*A ) def __A ( self ) -> List[str]: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*A ) def __A ( self ) -> Optional[int]: '''simple docstring''' __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*A ) @slow def __A ( self ) -> List[str]: '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = OpenAIGPTModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_torch class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" @slow def __A ( self ) -> Tuple: '''simple docstring''' __magic_name__ = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(A ) __magic_name__ = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=A ) # the president is __magic_name__ = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the __magic_name__ = model.generate(A , do_sample=A ) self.assertListEqual(output_ids[0].tolist() , A )
678
0
from functools import reduce a_ : Optional[int] = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def _SCREAMING_SNAKE_CASE ( snake_case_ : str = N ): return max( # mypy cannot properly interpret reduce int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) ) for i in range(len(snake_case_ ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
711
def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = [] __magic_name__ = 1 while len(snake_case_ ) < 1E6: constant.append(str(snake_case_ ) ) i += 1 __magic_name__ = ''''''.join(snake_case_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[9_9999] ) * int(constant[99_9999] ) ) if __name__ == "__main__": print(solution())
678
0
import sys import turtle def _SCREAMING_SNAKE_CASE ( snake_case_ : tuple[float, float] , snake_case_ : tuple[float, float] ): return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def _SCREAMING_SNAKE_CASE ( snake_case_ : tuple[float, float] , snake_case_ : tuple[float, float] , snake_case_ : tuple[float, float] , snake_case_ : int , ): my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(snake_case_ , get_mid(snake_case_ , snake_case_ ) , get_mid(snake_case_ , snake_case_ ) , depth - 1 ) triangle(snake_case_ , get_mid(snake_case_ , snake_case_ ) , get_mid(snake_case_ , snake_case_ ) , depth - 1 ) triangle(snake_case_ , get_mid(snake_case_ , snake_case_ ) , get_mid(snake_case_ , snake_case_ ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( 'Correct format for using this script: ' 'python fractals.py <int:depth_for_fractal>' ) a_ : str = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('red') a_ : int = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
712
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch a_ : str = True except ImportError: a_ : Optional[int] = False try: from torch.hub import _get_torch_home a_ : Optional[Any] = _get_torch_home() except ImportError: a_ : List[Any] = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) a_ : Any = os.path.join(torch_cache_home, 'transformers') a_ : Any = 'https://cdn.huggingface.co' a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert' a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) a_ : Any = os.path.join(PATH, 'config.yaml') a_ : Any = os.path.join(PATH, 'attributes.txt') a_ : Any = os.path.join(PATH, 'objects.txt') a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) a_ : int = 'pytorch_model.bin' a_ : Union[str, Any] = 'config.yaml' def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ): __magic_name__ = [] with open(snake_case_ ) as f: for object in f.readlines(): vg_classes.append(object.split(''',''' )[0].lower().strip() ) __magic_name__ = [] with open(snake_case_ ) as f: for object in f.readlines(): vg_attrs.append(object.split(''',''' )[0].lower().strip() ) return vg_classes, vg_attrs def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): __magic_name__ = OrderedDict() with open(snake_case_ , '''rb''' ) as f: __magic_name__ = pkl.load(snake_case_ )['''model'''] for k in copy.deepcopy(list(ckp.keys() ) ): __magic_name__ = ckp.pop(snake_case_ ) if isinstance(snake_case_ , np.ndarray ): __magic_name__ = torch.tensor(snake_case_ ) else: assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ ) __magic_name__ = v return r class SCREAMING_SNAKE_CASE_ : """simple docstring""" _a = {} def __init__( self , A , A = "root" , A=0 ) -> List[str]: '''simple docstring''' __magic_name__ = name __magic_name__ = level __magic_name__ = {} for k, v in dictionary.items(): if v is None: raise ValueError() __magic_name__ = copy.deepcopy(A ) __magic_name__ = copy.deepcopy(A ) if isinstance(A , A ): __magic_name__ = Config(A , name=A , level=level + 1 ) __magic_name__ = v setattr(self , A , A ) __magic_name__ = d def __repr__( self ) -> Union[str, Any]: '''simple docstring''' return str(list((self._pointer.keys()) ) ) def __setattr__( self , A , A ) -> Tuple: '''simple docstring''' __magic_name__ = val __magic_name__ = val __magic_name__ = key.split('''.''' ) __magic_name__ = len(A ) - 1 __magic_name__ = self._pointer if len(A ) > 1: for i, l in enumerate(A ): if hasattr(self , A ) and isinstance(getattr(self , A ) , A ): setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A ) if l == last_level: __magic_name__ = val else: __magic_name__ = pointer[l] def __A ( self ) -> List[Any]: '''simple docstring''' return self._pointer def __A ( self , A , A ) -> Any: '''simple docstring''' with open(F'{file_name}' , '''w''' ) as stream: dump(A , A ) def __A ( self , A , A ) -> List[Any]: '''simple docstring''' with open(F'{file_name}' , '''w''' ) as stream: json.dump(A , A ) @staticmethod def __A ( A ) -> Optional[Any]: '''simple docstring''' with open(A ) as stream: __magic_name__ = load(A , Loader=A ) return data def __str__( self ) -> List[Any]: '''simple docstring''' __magic_name__ = ''' ''' if self._name != "root": __magic_name__ = F'{t * (self._level-1)}{self._name}:\n' else: __magic_name__ = '''''' __magic_name__ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(A , A ): r += F'{t * (self._level)}{v}\n' self._level += 1 else: r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n' __magic_name__ = level return r[:-1] @classmethod def __A ( cls , A , **A ) -> int: '''simple docstring''' __magic_name__ , __magic_name__ = cls.get_config_dict(A , **A ) return cls(A ) @classmethod def __A ( cls , A , **A ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = kwargs.pop('''cache_dir''' , A ) __magic_name__ = kwargs.pop('''force_download''' , A ) __magic_name__ = kwargs.pop('''resume_download''' , A ) __magic_name__ = kwargs.pop('''proxies''' , A ) __magic_name__ = kwargs.pop('''local_files_only''' , A ) if os.path.isdir(A ): __magic_name__ = os.path.join(A , A ) elif os.path.isfile(A ) or is_remote_url(A ): __magic_name__ = pretrained_model_name_or_path else: __magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A ) try: # Load from URL or cache if already cached __magic_name__ = cached_path( A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , ) # Load config dict if resolved_config_file is None: raise EnvironmentError __magic_name__ = Config.load_yaml(A ) except EnvironmentError: __magic_name__ = '''Can\'t load config for''' raise EnvironmentError(A ) if resolved_config_file == config_file: print('''loading configuration file from path''' ) else: print('''loading configuration file cache''' ) return Config.load_yaml(A ), kwargs def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ): __magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device ) __magic_name__ = in_tensor.numpy() __magic_name__ = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), ( f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %' " element-wise mismatch" ) raise Exception('''tensors are all good''' ) # Hugging face functions below def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): __magic_name__ = urlparse(snake_case_ ) return parsed.scheme in ("http", "https") def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ): __magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX __magic_name__ = '''/''' not in model_id if legacy_format: return f'{endpoint}/{model_id}-{filename}' else: return f'{endpoint}/{model_id}/{filename}' def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ): __magic_name__ = '''python/{}'''.format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(snake_case_ , snake_case_ ): ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() ) elif isinstance(snake_case_ , snake_case_ ): ua += "; " + user_agent __magic_name__ = {'''user-agent''': ua} if resume_size > 0: __magic_name__ = '''bytes=%d-''' % (resume_size,) __magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ ) if response.status_code == 416: # Range not satisfiable return __magic_name__ = response.headers.get('''Content-Length''' ) __magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None __magic_name__ = tqdm( unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , ) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(snake_case_ ) ) temp_file.write(snake_case_ ) progress.close() def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ): if cache_dir is None: __magic_name__ = TRANSFORMERS_CACHE if isinstance(snake_case_ , snake_case_ ): __magic_name__ = str(snake_case_ ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) __magic_name__ = None if not local_files_only: try: __magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ ) if response.status_code == 200: __magic_name__ = response.headers.get('''ETag''' ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass __magic_name__ = url_to_filename(snake_case_ , snake_case_ ) # get cache path to put the file __magic_name__ = os.path.join(snake_case_ , snake_case_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(snake_case_ ): return cache_path else: __magic_name__ = [ file for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' ) if not file.endswith('''.json''' ) and not file.endswith('''.lock''' ) ] if len(snake_case_ ) > 0: return os.path.join(snake_case_ , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( '''Cannot find the requested files in the cached path and outgoing traffic has been''' ''' disabled. To enable model look-ups and downloads online, set \'local_files_only\'''' ''' to False.''' ) return None # From now on, etag is not None. if os.path.exists(snake_case_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. __magic_name__ = cache_path + '''.lock''' with FileLock(snake_case_ ): # If the download just completed while the lock was activated. if os.path.exists(snake_case_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: __magic_name__ = cache_path + '''.incomplete''' @contextmanager def _resumable_file_manager(): with open(snake_case_ , '''a+b''' ) as f: yield f __magic_name__ = _resumable_file_manager if os.path.exists(snake_case_ ): __magic_name__ = os.stat(snake_case_ ).st_size else: __magic_name__ = 0 else: __magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ ) __magic_name__ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( '''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , ) http_get( snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , ) os.replace(temp_file.name , snake_case_ ) __magic_name__ = {'''url''': url, '''etag''': etag} __magic_name__ = cache_path + '''.json''' with open(snake_case_ , '''w''' ) as meta_file: json.dump(snake_case_ , snake_case_ ) return cache_path def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ): __magic_name__ = url.encode('''utf-8''' ) __magic_name__ = shaaaa(snake_case_ ) __magic_name__ = url_hash.hexdigest() if etag: __magic_name__ = etag.encode('''utf-8''' ) __magic_name__ = shaaaa(snake_case_ ) filename += "." + etag_hash.hexdigest() if url.endswith('''.h5''' ): filename += ".h5" return filename def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ): if cache_dir is None: __magic_name__ = TRANSFORMERS_CACHE if isinstance(snake_case_ , snake_case_ ): __magic_name__ = str(snake_case_ ) if isinstance(snake_case_ , snake_case_ ): __magic_name__ = str(snake_case_ ) if is_remote_url(snake_case_ ): # URL, so get it from the cache (downloading if necessary) __magic_name__ = get_from_cache( snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , ) elif os.path.exists(snake_case_ ): # File, and it exists. __magic_name__ = url_or_filename elif urlparse(snake_case_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError('''file {} not found'''.format(snake_case_ ) ) else: # Something unknown raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) ) if extract_compressed_file: if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" __magic_name__ , __magic_name__ = os.path.split(snake_case_ ) __magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted''' __magic_name__ = os.path.join(snake_case_ , snake_case_ ) if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions __magic_name__ = output_path + '''.lock''' with FileLock(snake_case_ ): shutil.rmtree(snake_case_ , ignore_errors=snake_case_ ) os.makedirs(snake_case_ ) if is_zipfile(snake_case_ ): with ZipFile(snake_case_ , '''r''' ) as zip_file: zip_file.extractall(snake_case_ ) zip_file.close() elif tarfile.is_tarfile(snake_case_ ): __magic_name__ = tarfile.open(snake_case_ ) tar_file.extractall(snake_case_ ) tar_file.close() else: raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) ) return output_path_extracted return output_path def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ): assert isinstance(snake_case_ , snake_case_ ) if os.path.isfile(snake_case_ ): with open(snake_case_ ) as f: __magic_name__ = eval(f.read() ) else: __magic_name__ = requests.get(snake_case_ ) try: __magic_name__ = requests.json() except Exception: __magic_name__ = req.content.decode() assert data is not None, "could not connect" try: __magic_name__ = eval(snake_case_ ) except Exception: __magic_name__ = data.split('''\n''' ) req.close() return data def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ): __magic_name__ = requests.get(snake_case_ ) __magic_name__ = np.array(Image.open(BytesIO(response.content ) ) ) return img def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ): __magic_name__ = url.split('''/''' )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(snake_case_ ) with open(snake_case_ , '''rb''' ) as stream: __magic_name__ = pkl.load(snake_case_ ) __magic_name__ = weights.pop('''model''' ) __magic_name__ = {} for k, v in model.items(): __magic_name__ = torch.from_numpy(snake_case_ ) if "running_var" in k: __magic_name__ = torch.tensor([0] ) __magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' ) __magic_name__ = zero return new def _SCREAMING_SNAKE_CASE ( ): print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' ) def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ): assert isinstance(snake_case_ , snake_case_ ) if os.path.isfile(snake_case_ ): __magic_name__ = cva.imread(snake_case_ ) else: __magic_name__ = get_image_from_url(snake_case_ ) assert img is not None, f'could not connect to: {im}' __magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB ) if input_format == "RGB": __magic_name__ = img[:, :, ::-1] return img def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ): return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
678
0
import numpy as np def _SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float = 1E-12 , snake_case_ : int = 100 , ): assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[1] # Ensure proper dimensionality. assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(snake_case_ ) == np.iscomplexobj(snake_case_ ) __magic_name__ = np.iscomplexobj(snake_case_ ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(snake_case_ , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. __magic_name__ = False __magic_name__ = 0 __magic_name__ = 0 __magic_name__ = 1E12 while not convergence: # Multiple matrix by the vector. __magic_name__ = np.dot(snake_case_ , snake_case_ ) # Normalize the resulting output vector. __magic_name__ = w / np.linalg.norm(snake_case_ ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) __magic_name__ = vector.conj().T if is_complex else vector.T __magic_name__ = np.dot(snake_case_ , np.dot(snake_case_ , snake_case_ ) ) # Check convergence. __magic_name__ = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: __magic_name__ = True __magic_name__ = lambda_ if is_complex: __magic_name__ = np.real(lambda_ ) return lambda_, vector def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) __magic_name__ = np.array([41, 4, 20] ) __magic_name__ = real_input_matrix.astype(np.complexaaa ) __magic_name__ = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T __magic_name__ = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": __magic_name__ = real_input_matrix __magic_name__ = real_vector elif problem_type == "complex": __magic_name__ = complex_input_matrix __magic_name__ = complex_vector # Our implementation. __magic_name__ , __magic_name__ = power_iteration(snake_case_ , snake_case_ ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). __magic_name__ , __magic_name__ = np.linalg.eigh(snake_case_ ) # Last eigenvalue is the maximum one. __magic_name__ = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. __magic_name__ = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(snake_case_ ) - np.abs(snake_case_ ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
713
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a_ : Optional[int] = 16 a_ : int = 32 def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ): __magic_name__ = AutoTokenizer.from_pretrained(snake_case_ ) __magic_name__ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case_ : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __magic_name__ = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case_ : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __magic_name__ = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) __magic_name__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : str ): model.eval() __magic_name__ = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __magic_name__ , __magic_name__ = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: __magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] __magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) __magic_name__ = metric.compute() return eval_metric["accuracy"] def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple ): # Initialize accelerator __magic_name__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __magic_name__ = config['''lr'''] __magic_name__ = int(config['''num_epochs'''] ) __magic_name__ = int(config['''seed'''] ) __magic_name__ = int(config['''batch_size'''] ) __magic_name__ = args.model_name_or_path set_seed(snake_case_ ) __magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer __magic_name__ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: __magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __magic_name__ = 1 __magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __magic_name__ = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: __magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over __magic_name__ = 0 # We also need to keep track of the stating epoch so files are named properly __magic_name__ = 0 __magic_name__ = evaluate.load('''glue''' , '''mrpc''' ) __magic_name__ = num_epochs if args.partial_train_epoch is not None: __magic_name__ = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) __magic_name__ = args.resume_from_checkpoint.split('''epoch_''' )[1] __magic_name__ = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break __magic_name__ = int(snake_case_ ) + 1 __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) accelerator.print('''resumed checkpoint performance:''' , snake_case_ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f: __magic_name__ = json.load(snake_case_ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model __magic_name__ = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.loss __magic_name__ = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 __magic_name__ = f'epoch_{epoch}' __magic_name__ = os.path.join(args.output_dir , snake_case_ ) accelerator.save_state(snake_case_ ) __magic_name__ = evaluation_loop(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) __magic_name__ = accuracy __magic_name__ = lr_scheduler.get_lr()[0] __magic_name__ = optimizer.param_groups[0]['''lr'''] __magic_name__ = epoch __magic_name__ = overall_step accelerator.print(f'epoch {epoch}:' , snake_case_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f: json.dump(snake_case_ , snake_case_ ) def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , ) parser.add_argument( '''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=snake_case_ , default=snake_case_ , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--partial_train_epoch''' , type=snake_case_ , default=snake_case_ , help='''If passed, the training will stop after this number of epochs.''' , ) parser.add_argument( '''--num_epochs''' , type=snake_case_ , default=2 , help='''Number of train epochs.''' , ) __magic_name__ = parser.parse_args() __magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
678
0
import re def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): __magic_name__ = re.compile( r'''^(?:0|94|\+94|0{2}94)''' r'''7(0|1|2|4|5|6|7|8)''' r'''(-| |)''' r'''\d{7}$''' ) return bool(re.search(snake_case_ , snake_case_ ) ) if __name__ == "__main__": a_ : Optional[int] = '0094702343221' print(is_sri_lankan_phone_number(phone))
714
def _SCREAMING_SNAKE_CASE ( snake_case_ : str ): return " ".join( ''''''.join(word[::-1] ) if len(snake_case_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
678
0
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): if not isinstance(snake_case_ , snake_case_ ): raise ValueError('''multiplicative_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''multiplicative_persistence() does not accept negative values''' ) __magic_name__ = 0 __magic_name__ = str(snake_case_ ) while len(snake_case_ ) != 1: __magic_name__ = [int(snake_case_ ) for i in num_string] __magic_name__ = 1 for i in range(0 , len(snake_case_ ) ): total *= numbers[i] __magic_name__ = str(snake_case_ ) steps += 1 return steps def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): if not isinstance(snake_case_ , snake_case_ ): raise ValueError('''additive_persistence() only accepts integral values''' ) if num < 0: raise ValueError('''additive_persistence() does not accept negative values''' ) __magic_name__ = 0 __magic_name__ = str(snake_case_ ) while len(snake_case_ ) != 1: __magic_name__ = [int(snake_case_ ) for i in num_string] __magic_name__ = 0 for i in range(0 , len(snake_case_ ) ): total += numbers[i] __magic_name__ = str(snake_case_ ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
715
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets a_ : Any = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n' a_ : int = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n' a_ : List[str] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): """simple docstring""" def __A ( self ) -> List[Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def __A ( self , A , A , A=None , A=None , A=None , A=None , A="auto" , A=-1 , A=0.9 , A=5 , A=5_00 , A="gpt2-large" , A=-1 , A=10_24 , A=25 , A=5 , A=True , A=25 , ) -> Optional[int]: '''simple docstring''' __magic_name__ = compute_mauve( p_text=A , q_text=A , p_features=A , q_features=A , p_tokens=A , q_tokens=A , num_buckets=A , pca_max_data=A , kmeans_explained_var=A , kmeans_num_redo=A , kmeans_max_iter=A , featurize_model_name=A , device_id=A , max_text_length=A , divergence_curve_discretization_size=A , mauve_scaling_factor=A , verbose=A , seed=A , ) return out
678
0
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def _SCREAMING_SNAKE_CASE ( snake_case_ : Namespace ): return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) a_ : Union[str, Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n' class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" @staticmethod def __A ( A ) -> List[Any]: '''simple docstring''' __magic_name__ = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=A , required=A , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=A , required=A , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=A , required=A , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=A , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=A , default=A , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=A ) def __init__( self , A , A , A , A , A , *A , ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(F'Loading model {model_type}' ) __magic_name__ = model_type __magic_name__ = tf_checkpoint __magic_name__ = pytorch_dump_output __magic_name__ = config __magic_name__ = finetuning_task_name def __A ( self ) -> List[Any]: '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(A ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) if "ckpt" in self._tf_checkpoint.lower(): __magic_name__ = self._tf_checkpoint __magic_name__ = '''''' else: __magic_name__ = self._tf_checkpoint __magic_name__ = '''''' convert_transfo_xl_checkpoint_to_pytorch( A , self._config , self._pytorch_dump_output , A ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(A ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
716
from __future__ import annotations import typing from collections.abc import Iterable import numpy as np a_ : Tuple = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 a_ : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007 def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ): return np.sqrt(np.sum((np.asarray(snake_case_ ) - np.asarray(snake_case_ )) ** 2 ) ) def _SCREAMING_SNAKE_CASE ( snake_case_ : Vector , snake_case_ : Vector ): return sum((va - va) ** 2 for va, va in zip(snake_case_ , snake_case_ ) ) ** (1 / 2) if __name__ == "__main__": def _SCREAMING_SNAKE_CASE ( ): from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_0000 , globals=globals() , ) ) benchmark()
678
0
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler a_ : Tuple = 16 a_ : List[Any] = 32 def _SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ): __magic_name__ = AutoTokenizer.from_pretrained(snake_case_ ) __magic_name__ = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case_ : List[Any] ): # max_length=None => use the model max length (it's actually the default) __magic_name__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case_ , max_length=snake_case_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __magic_name__ = datasets.map( snake_case_ , batched=snake_case_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=snake_case_ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __magic_name__ = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case_ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(snake_case_ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __magic_name__ = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) __magic_name__ = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ ) return train_dataloader, eval_dataloader def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] ): # Initialize accelerator __magic_name__ = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __magic_name__ = config['''lr'''] __magic_name__ = int(config['''num_epochs'''] ) __magic_name__ = int(config['''seed'''] ) __magic_name__ = int(config['''batch_size'''] ) __magic_name__ = args.model_name_or_path set_seed(snake_case_ ) __magic_name__ , __magic_name__ = get_dataloaders(snake_case_ , snake_case_ , snake_case_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __magic_name__ = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ ) # Instantiate optimizer __magic_name__ = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __magic_name__ = optimizer_cls(params=model.parameters() , lr=snake_case_ ) if accelerator.state.deepspeed_plugin is not None: __magic_name__ = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __magic_name__ = 1 __magic_name__ = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __magic_name__ = get_linear_schedule_with_warmup( optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , ) else: __magic_name__ = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) # We need to keep track of how many total steps we have iterated over __magic_name__ = 0 # We also need to keep track of the stating epoch so files are named properly __magic_name__ = 0 # Now we train the model __magic_name__ = evaluate.load('''glue''' , '''mrpc''' ) __magic_name__ = 0 __magic_name__ = {} for epoch in range(snake_case_ , snake_case_ ): model.train() for step, batch in enumerate(snake_case_ ): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.loss __magic_name__ = loss / gradient_accumulation_steps accelerator.backward(snake_case_ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __magic_name__ = 0 for step, batch in enumerate(snake_case_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __magic_name__ = model(**snake_case_ ) __magic_name__ = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __magic_name__ , __magic_name__ = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(snake_case_ ) - 1: __magic_name__ = predictions[: len(eval_dataloader.dataset ) - samples_seen] __magic_name__ = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=snake_case_ , references=snake_case_ , ) __magic_name__ = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , snake_case_ ) __magic_name__ = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: __magic_name__ = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(snake_case_ , snake_case_ ) def _SCREAMING_SNAKE_CASE ( ): __magic_name__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=snake_case_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case_ , ) parser.add_argument( '''--output_dir''' , type=snake_case_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=snake_case_ , default=snake_case_ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=snake_case_ , default=3 , help='''Number of train epochs.''' , ) __magic_name__ = parser.parse_args() __magic_name__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(snake_case_ , snake_case_ ) if __name__ == "__main__": main()
717
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() a_ : str = logging.get_logger(__name__) a_ : Union[str, Any] = 'https://openaipublic.azureedge.net/jukebox/models/' a_ : List[Any] = { 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ): if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: __magic_name__ = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: __magic_name__ = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: __magic_name__ = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: __magic_name__ = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: __magic_name__ = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: __magic_name__ = key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: __magic_name__ = key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: __magic_name__ = key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ): __magic_name__ = {} import re __magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) __magic_name__ = re.compile( r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) __magic_name__ = re.compile( r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) __magic_name__ = re.compile( r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) __magic_name__ = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(snake_case_ ): __magic_name__ = re_encoder_block_conv_in.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) __magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}' __magic_name__ = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ ) elif re_encoder_block_resnet.fullmatch(snake_case_ ): __magic_name__ = re_encoder_block_resnet.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) __magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]] __magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.' __magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' __magic_name__ = prefix + resnet_block __magic_name__ = re_encoder_block_resnet.sub(snake_case_ , snake_case_ ) elif re_encoder_block_proj_out.fullmatch(snake_case_ ): __magic_name__ = re_encoder_block_proj_out.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}' __magic_name__ = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(snake_case_ ): __magic_name__ = re_decoder_block_conv_out.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2 __magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}' __magic_name__ = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ ) elif re_decoder_block_resnet.fullmatch(snake_case_ ): __magic_name__ = re_decoder_block_resnet.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[2] ) * 2 + int(groups[3] ) - 2 __magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]] __magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.' __magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' __magic_name__ = prefix + resnet_block __magic_name__ = re_decoder_block_resnet.sub(snake_case_ , snake_case_ ) elif re_decoder_block_proj_in.fullmatch(snake_case_ ): __magic_name__ = re_decoder_block_proj_in.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}' __magic_name__ = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(snake_case_ ): __magic_name__ = re_prior_cond_conv_out.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2 __magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}' __magic_name__ = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ ) elif re_prior_cond_resnet.fullmatch(snake_case_ ): __magic_name__ = re_prior_cond_resnet.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = int(groups[1] ) * 2 + int(groups[2] ) - 2 __magic_name__ = {'''1''': 1, '''3''': 2}[groups[-2]] __magic_name__ = f'conditioner_blocks.upsampler.upsample_block.{block_index}.' __magic_name__ = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}' __magic_name__ = prefix + resnet_block __magic_name__ = re_prior_cond_resnet.sub(snake_case_ , snake_case_ ) elif re_prior_cond_proj_in.fullmatch(snake_case_ ): __magic_name__ = re_prior_cond_proj_in.match(snake_case_ ) __magic_name__ = regex_match.groups() __magic_name__ = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}' __magic_name__ = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ ) # keep original key else: __magic_name__ = original_key __magic_name__ = replace_key(snake_case_ ) if f'{key_prefix}.{key}' not in model_state_dict or key is None: print(f'failed converting {original_key} to {key}, does not match' ) # handle missmatched shape elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape: __magic_name__ = model_state_dict[f'{key_prefix}.{key}'] print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' ) __magic_name__ = original_key __magic_name__ = original_key __magic_name__ = value return new_dict @torch.no_grad() def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict=None , snake_case_ : Any=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ): __magic_name__ = requests.get(f'{PREFIX}{file}' , allow_redirects=snake_case_ ) os.makedirs(f'{pytorch_dump_folder_path}/' , exist_ok=snake_case_ ) open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , '''wb''' ).write(r.content ) __magic_name__ = MODEL_MAPPING[model_name.split('''/''' )[-1]] __magic_name__ = JukeboxConfig.from_pretrained(snake_case_ ) __magic_name__ = JukeboxModel(snake_case_ ) __magic_name__ = [] __magic_name__ = {} for i, dict_name in enumerate(snake_case_ ): __magic_name__ = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['''model'''] __magic_name__ = {} for k in old_dic.keys(): if k.endswith('''.b''' ): __magic_name__ = old_dic[k] elif k.endswith('''.w''' ): __magic_name__ = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: __magic_name__ = old_dic[k] else: __magic_name__ = old_dic[k] __magic_name__ = '''vqvae''' if i == 0 else f'priors.{3 - i}' __magic_name__ = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ ) weight_dict.append(snake_case_ ) __magic_name__ = weight_dict.pop(0 ) model.vqvae.load_state_dict(snake_case_ ) for i in range(len(snake_case_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) with open(f'{pytorch_dump_folder_path}/mapping.json' , '''w''' ) as txtfile: json.dump(snake_case_ , snake_case_ ) print(f'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case_ ) return weight_dict if __name__ == "__main__": a_ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) a_ : int = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
678
0
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ): __magic_name__ = len(snake_case_ ) print('''The following activities are selected:''' ) # The first activity is always selected __magic_name__ = 0 print(snake_case_ , end=''',''' ) # Consider rest of the activities for j in range(snake_case_ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(snake_case_ , end=''',''' ) __magic_name__ = j if __name__ == "__main__": import doctest doctest.testmod() a_ : Dict = [1, 3, 0, 5, 8, 5] a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
718
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : int = logging.get_logger(__name__) a_ : Optional[int] = { 'microsoft/table-transformer-detection': ( 'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json' ), } class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _a = """table-transformer""" _a = ["""past_key_values"""] _a = { """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", } def __init__( self , A=True , A=None , A=3 , A=1_00 , A=6 , A=20_48 , A=8 , A=6 , A=20_48 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_56 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Any: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) __magic_name__ = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(A , A ): __magic_name__ = backbone_config.get('''model_type''' ) __magic_name__ = CONFIG_MAPPING[backbone_model_type] __magic_name__ = config_class.from_dict(A ) # set timm attributes to None __magic_name__ , __magic_name__ , __magic_name__ = None, None, None __magic_name__ = use_timm_backbone __magic_name__ = backbone_config __magic_name__ = num_channels __magic_name__ = num_queries __magic_name__ = d_model __magic_name__ = encoder_ffn_dim __magic_name__ = encoder_layers __magic_name__ = encoder_attention_heads __magic_name__ = decoder_ffn_dim __magic_name__ = decoder_layers __magic_name__ = decoder_attention_heads __magic_name__ = dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = activation_function __magic_name__ = init_std __magic_name__ = init_xavier_std __magic_name__ = encoder_layerdrop __magic_name__ = decoder_layerdrop __magic_name__ = encoder_layers __magic_name__ = auxiliary_loss __magic_name__ = position_embedding_type __magic_name__ = backbone __magic_name__ = use_pretrained_backbone __magic_name__ = dilation # Hungarian matcher __magic_name__ = class_cost __magic_name__ = bbox_cost __magic_name__ = giou_cost # Loss coefficients __magic_name__ = mask_loss_coefficient __magic_name__ = dice_loss_coefficient __magic_name__ = bbox_loss_coefficient __magic_name__ = giou_loss_coefficient __magic_name__ = eos_coefficient super().__init__(is_encoder_decoder=A , **A ) @property def __A ( self ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def __A ( self ) -> int: '''simple docstring''' return self.d_model class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" _a = version.parse("""1.11""" ) @property def __A ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def __A ( self ) -> float: '''simple docstring''' return 1E-5 @property def __A ( self ) -> int: '''simple docstring''' return 12
678
0
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE_ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self , *A , A=None , A=None , **A ) -> int: '''simple docstring''' super().__init__(*A , **A ) __magic_name__ = eval_examples __magic_name__ = post_process_function def __A ( self , A=None , A=None , A=None , A = "eval" ) -> Optional[int]: '''simple docstring''' __magic_name__ = self.eval_dataset if eval_dataset is None else eval_dataset __magic_name__ = self.get_eval_dataloader(A ) __magic_name__ = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ = self.compute_metrics __magic_name__ = None __magic_name__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop __magic_name__ = time.time() try: __magic_name__ = eval_loop( A , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , ) finally: __magic_name__ = compute_metrics __magic_name__ = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __magic_name__ = self.post_process_function(A , A , output.predictions ) __magic_name__ = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): __magic_name__ = metrics.pop(A ) metrics.update(output.metrics ) else: __magic_name__ = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(A ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __magic_name__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , A ) return metrics def __A ( self , A , A , A=None , A = "test" ) -> Optional[int]: '''simple docstring''' __magic_name__ = self.get_test_dataloader(A ) # Temporarily disable metric computation, we will do it in the loop here. __magic_name__ = self.compute_metrics __magic_name__ = None __magic_name__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop __magic_name__ = time.time() try: __magic_name__ = eval_loop( A , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , ) finally: __magic_name__ = compute_metrics __magic_name__ = self.args.eval_batch_size * self.args.world_size if F'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[F'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __magic_name__ = self.post_process_function(A , A , output.predictions , '''predict''' ) __magic_name__ = self.compute_metrics(A ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'{metric_key_prefix}_' ): __magic_name__ = metrics.pop(A ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
719
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ): # Initialise PyTorch model __magic_name__ = LxmertConfig.from_json_file(snake_case_ ) print(f'Building PyTorch model from configuration: {config}' ) __magic_name__ = LxmertForPreTraining(snake_case_ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case_ ) if __name__ == "__main__": a_ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
678
0
'''simple docstring''' import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def __A ( self , A , A ) -> List[Any]: '''simple docstring''' __magic_name__ = jnp.ones((batch_size, length) ) / length return scores def __A ( self ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = None __magic_name__ = 20 __magic_name__ = self._get_uniform_logits(batch_size=2 , length=A ) # tweak scores to not be uniform anymore __magic_name__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch __magic_name__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax __magic_name__ = jax.nn.softmax(A , axis=-1 ) __magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) __magic_name__ = FlaxTemperatureLogitsWarper(temperature=1.3 ) __magic_name__ = jax.nn.softmax(temp_dist_warper_sharper(A , scores.copy() , cur_len=A ) , axis=-1 ) __magic_name__ = jax.nn.softmax(temp_dist_warper_smoother(A , scores.copy() , cur_len=A ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def __A ( self ) -> int: '''simple docstring''' __magic_name__ = None __magic_name__ = 10 __magic_name__ = 2 # create ramp distribution __magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy() __magic_name__ = ramp_logits[1:, : vocab_size // 2] + vocab_size __magic_name__ = FlaxTopKLogitsWarper(3 ) __magic_name__ = top_k_warp(A , A , cur_len=A ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case __magic_name__ = 5 __magic_name__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) __magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, length) ).copy() __magic_name__ = top_k_warp_safety_check(A , A , cur_len=A ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = None __magic_name__ = 10 __magic_name__ = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) __magic_name__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) __magic_name__ = FlaxTopPLogitsWarper(0.8 ) __magic_name__ = np.exp(top_p_warp(A , A , cur_len=A ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 __magic_name__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(A , A , atol=1E-3 ) ) # check edge cases with negative and extreme logits __magic_name__ = np.broadcast_to(np.arange(A )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme __magic_name__ = ramp_logits[1] * 1_00.0 # make sure at least 2 tokens are kept __magic_name__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) __magic_name__ = top_p_warp(A , A , cur_len=A ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def __A ( self ) -> int: '''simple docstring''' __magic_name__ = 20 __magic_name__ = 4 __magic_name__ = 0 __magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A ) # check that min length is applied at length 5 __magic_name__ = ids_tensor((batch_size, 20) , vocab_size=20 ) __magic_name__ = 5 __magic_name__ = self._get_uniform_logits(A , A ) __magic_name__ = min_dist_processor(A , A , cur_len=A ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] ) # check that min length is not applied anymore at length 15 __magic_name__ = self._get_uniform_logits(A , A ) __magic_name__ = 15 __magic_name__ = min_dist_processor(A , A , cur_len=A ) self.assertFalse(jnp.isinf(A ).any() ) def __A ( self ) -> Any: '''simple docstring''' __magic_name__ = 20 __magic_name__ = 4 __magic_name__ = 0 __magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A ) # check that all scores are -inf except the bos_token_id score __magic_name__ = ids_tensor((batch_size, 1) , vocab_size=20 ) __magic_name__ = 1 __magic_name__ = self._get_uniform_logits(A , A ) __magic_name__ = logits_processor(A , A , cur_len=A ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 __magic_name__ = 3 __magic_name__ = self._get_uniform_logits(A , A ) __magic_name__ = logits_processor(A , A , cur_len=A ) self.assertFalse(jnp.isinf(A ).any() ) def __A ( self ) -> List[Any]: '''simple docstring''' __magic_name__ = 20 __magic_name__ = 4 __magic_name__ = 0 __magic_name__ = 5 __magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A ) # check that all scores are -inf except the eos_token_id when max_length is reached __magic_name__ = ids_tensor((batch_size, 4) , vocab_size=20 ) __magic_name__ = 4 __magic_name__ = self._get_uniform_logits(A , A ) __magic_name__ = logits_processor(A , A , cur_len=A ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached __magic_name__ = 3 __magic_name__ = self._get_uniform_logits(A , A ) __magic_name__ = logits_processor(A , A , cur_len=A ) self.assertFalse(jnp.isinf(A ).any() ) def __A ( self ) -> Dict: '''simple docstring''' __magic_name__ = 4 __magic_name__ = 10 __magic_name__ = 15 __magic_name__ = 2 __magic_name__ = 1 __magic_name__ = 15 # dummy input_ids and scores __magic_name__ = ids_tensor((batch_size, sequence_length) , A ) __magic_name__ = input_ids.copy() __magic_name__ = self._get_uniform_logits(A , A ) __magic_name__ = scores.copy() # instantiate all dist processors __magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) __magic_name__ = FlaxTopKLogitsWarper(3 ) __magic_name__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A ) __magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A ) __magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A ) __magic_name__ = 10 # no processor list __magic_name__ = temp_dist_warp(A , A , cur_len=A ) __magic_name__ = top_k_warp(A , A , cur_len=A ) __magic_name__ = top_p_warp(A , A , cur_len=A ) __magic_name__ = min_dist_proc(A , A , cur_len=A ) __magic_name__ = bos_dist_proc(A , A , cur_len=A ) __magic_name__ = eos_dist_proc(A , A , cur_len=A ) # with processor list __magic_name__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __magic_name__ = processor(A , A , cur_len=A ) # scores should be equal self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def __A ( self ) -> List[str]: '''simple docstring''' __magic_name__ = 4 __magic_name__ = 10 __magic_name__ = 15 __magic_name__ = 2 __magic_name__ = 1 __magic_name__ = 15 # dummy input_ids and scores __magic_name__ = ids_tensor((batch_size, sequence_length) , A ) __magic_name__ = input_ids.copy() __magic_name__ = self._get_uniform_logits(A , A ) __magic_name__ = scores.copy() # instantiate all dist processors __magic_name__ = FlaxTemperatureLogitsWarper(temperature=0.5 ) __magic_name__ = FlaxTopKLogitsWarper(3 ) __magic_name__ = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors __magic_name__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A ) __magic_name__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A ) __magic_name__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A , eos_token_id=A ) __magic_name__ = 10 # no processor list def run_no_processor_list(A , A , A ): __magic_name__ = temp_dist_warp(A , A , cur_len=A ) __magic_name__ = top_k_warp(A , A , cur_len=A ) __magic_name__ = top_p_warp(A , A , cur_len=A ) __magic_name__ = min_dist_proc(A , A , cur_len=A ) __magic_name__ = bos_dist_proc(A , A , cur_len=A ) __magic_name__ = eos_dist_proc(A , A , cur_len=A ) return scores # with processor list def run_processor_list(A , A , A ): __magic_name__ = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) __magic_name__ = processor(A , A , cur_len=A ) return scores __magic_name__ = jax.jit(A ) __magic_name__ = jax.jit(A ) __magic_name__ = jitted_run_no_processor_list(A , A , A ) __magic_name__ = jitted_run_processor_list(A , A , A ) # scores should be equal self.assertTrue(jnp.allclose(A , A , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
720
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ): __magic_name__ = { '''en''': '''Machine learning is great, isn\'t it?''', '''ru''': '''Машинное обучение - это здорово, не так ли?''', '''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''', } # BLUE scores as follows: # "pair": [fairseq, transformers] __magic_name__ = { '''wmt16-en-de-dist-12-1''': [28.3, 27.52], '''wmt16-en-de-dist-6-1''': [27.4, 27.11], '''wmt16-en-de-12-1''': [26.9, 25.75], } __magic_name__ = f'{src_lang}-{tgt_lang}' __magic_name__ = f'\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "allenai/{model_name}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n' model_card_dir.mkdir(parents=snake_case_ , exist_ok=snake_case_ ) __magic_name__ = os.path.join(snake_case_ , '''README.md''' ) print(f'Generating {path}' ) with open(snake_case_ , '''w''' , encoding='''utf-8''' ) as f: f.write(snake_case_ ) # make sure we are under the root of the project a_ : Tuple = Path(__file__).resolve().parent.parent.parent a_ : Dict = repo_dir / 'model_cards' for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: a_ : List[str] = model_cards_dir / 'allenai' / model_name write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
678
0
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch a_ : str = True except ImportError: a_ : Optional[int] = False try: from torch.hub import _get_torch_home a_ : Optional[Any] = _get_torch_home() except ImportError: a_ : List[Any] = os.path.expanduser( os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')) ) a_ : Any = os.path.join(torch_cache_home, 'transformers') a_ : Any = 'https://cdn.huggingface.co' a_ : Any = 'https://s3.amazonaws.com/models.huggingface.co/bert' a_ : int = '/'.join(str(Path(__file__).resolve()).split('/')[:-1]) a_ : Any = os.path.join(PATH, 'config.yaml') a_ : Any = os.path.join(PATH, 'attributes.txt') a_ : Any = os.path.join(PATH, 'objects.txt') a_ : List[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path) a_ : Any = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE) a_ : Optional[int] = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE) a_ : int = 'pytorch_model.bin' a_ : Union[str, Any] = 'config.yaml' def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any]=OBJECTS , snake_case_ : str=ATTRIBUTES ): __magic_name__ = [] with open(snake_case_ ) as f: for object in f.readlines(): vg_classes.append(object.split(''',''' )[0].lower().strip() ) __magic_name__ = [] with open(snake_case_ ) as f: for object in f.readlines(): vg_attrs.append(object.split(''',''' )[0].lower().strip() ) return vg_classes, vg_attrs def _SCREAMING_SNAKE_CASE ( snake_case_ : int ): __magic_name__ = OrderedDict() with open(snake_case_ , '''rb''' ) as f: __magic_name__ = pkl.load(snake_case_ )['''model'''] for k in copy.deepcopy(list(ckp.keys() ) ): __magic_name__ = ckp.pop(snake_case_ ) if isinstance(snake_case_ , np.ndarray ): __magic_name__ = torch.tensor(snake_case_ ) else: assert isinstance(snake_case_ , torch.tensor ), type(snake_case_ ) __magic_name__ = v return r class SCREAMING_SNAKE_CASE_ : """simple docstring""" _a = {} def __init__( self , A , A = "root" , A=0 ) -> List[str]: '''simple docstring''' __magic_name__ = name __magic_name__ = level __magic_name__ = {} for k, v in dictionary.items(): if v is None: raise ValueError() __magic_name__ = copy.deepcopy(A ) __magic_name__ = copy.deepcopy(A ) if isinstance(A , A ): __magic_name__ = Config(A , name=A , level=level + 1 ) __magic_name__ = v setattr(self , A , A ) __magic_name__ = d def __repr__( self ) -> Union[str, Any]: '''simple docstring''' return str(list((self._pointer.keys()) ) ) def __setattr__( self , A , A ) -> Tuple: '''simple docstring''' __magic_name__ = val __magic_name__ = val __magic_name__ = key.split('''.''' ) __magic_name__ = len(A ) - 1 __magic_name__ = self._pointer if len(A ) > 1: for i, l in enumerate(A ): if hasattr(self , A ) and isinstance(getattr(self , A ) , A ): setattr(getattr(self , A ) , '''.'''.join(levels[i:] ) , A ) if l == last_level: __magic_name__ = val else: __magic_name__ = pointer[l] def __A ( self ) -> List[Any]: '''simple docstring''' return self._pointer def __A ( self , A , A ) -> Any: '''simple docstring''' with open(F'{file_name}' , '''w''' ) as stream: dump(A , A ) def __A ( self , A , A ) -> List[Any]: '''simple docstring''' with open(F'{file_name}' , '''w''' ) as stream: json.dump(A , A ) @staticmethod def __A ( A ) -> Optional[Any]: '''simple docstring''' with open(A ) as stream: __magic_name__ = load(A , Loader=A ) return data def __str__( self ) -> List[Any]: '''simple docstring''' __magic_name__ = ''' ''' if self._name != "root": __magic_name__ = F'{t * (self._level-1)}{self._name}:\n' else: __magic_name__ = '''''' __magic_name__ = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(A , A ): r += F'{t * (self._level)}{v}\n' self._level += 1 else: r += F'{t * (self._level)}{k}: {v} ({type(A ).__name__})\n' __magic_name__ = level return r[:-1] @classmethod def __A ( cls , A , **A ) -> int: '''simple docstring''' __magic_name__ , __magic_name__ = cls.get_config_dict(A , **A ) return cls(A ) @classmethod def __A ( cls , A , **A ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = kwargs.pop('''cache_dir''' , A ) __magic_name__ = kwargs.pop('''force_download''' , A ) __magic_name__ = kwargs.pop('''resume_download''' , A ) __magic_name__ = kwargs.pop('''proxies''' , A ) __magic_name__ = kwargs.pop('''local_files_only''' , A ) if os.path.isdir(A ): __magic_name__ = os.path.join(A , A ) elif os.path.isfile(A ) or is_remote_url(A ): __magic_name__ = pretrained_model_name_or_path else: __magic_name__ = hf_bucket_url(A , filename=A , use_cdn=A ) try: # Load from URL or cache if already cached __magic_name__ = cached_path( A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , ) # Load config dict if resolved_config_file is None: raise EnvironmentError __magic_name__ = Config.load_yaml(A ) except EnvironmentError: __magic_name__ = '''Can\'t load config for''' raise EnvironmentError(A ) if resolved_config_file == config_file: print('''loading configuration file from path''' ) else: print('''loading configuration file cache''' ) return Config.load_yaml(A ), kwargs def _SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ): __magic_name__ = torch.load('''dump.pt''' , map_location=in_tensor.device ) __magic_name__ = in_tensor.numpy() __magic_name__ = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ), ( f'{sum([1 for x in np.isclose(snake_case_ , snake_case_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %' " element-wise mismatch" ) raise Exception('''tensors are all good''' ) # Hugging face functions below def _SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): __magic_name__ = urlparse(snake_case_ ) return parsed.scheme in ("http", "https") def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : Optional[Any]=True ): __magic_name__ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX __magic_name__ = '''/''' not in model_id if legacy_format: return f'{endpoint}/{model_id}-{filename}' else: return f'{endpoint}/{model_id}/{filename}' def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple , snake_case_ : List[str]=None , snake_case_ : Dict=0 , snake_case_ : Tuple=None , ): __magic_name__ = '''python/{}'''.format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(snake_case_ , snake_case_ ): ua += "; " + "; ".join('''{}/{}'''.format(snake_case_ , snake_case_ ) for k, v in user_agent.items() ) elif isinstance(snake_case_ , snake_case_ ): ua += "; " + user_agent __magic_name__ = {'''user-agent''': ua} if resume_size > 0: __magic_name__ = '''bytes=%d-''' % (resume_size,) __magic_name__ = requests.get(snake_case_ , stream=snake_case_ , proxies=snake_case_ , headers=snake_case_ ) if response.status_code == 416: # Range not satisfiable return __magic_name__ = response.headers.get('''Content-Length''' ) __magic_name__ = resume_size + int(snake_case_ ) if content_length is not None else None __magic_name__ = tqdm( unit='''B''' , unit_scale=snake_case_ , total=snake_case_ , initial=snake_case_ , desc='''Downloading''' , ) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(snake_case_ ) ) temp_file.write(snake_case_ ) progress.close() def _SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict=None , snake_case_ : int=False , snake_case_ : List[Any]=None , snake_case_ : Tuple=10 , snake_case_ : int=False , snake_case_ : Any=None , snake_case_ : Tuple=False , ): if cache_dir is None: __magic_name__ = TRANSFORMERS_CACHE if isinstance(snake_case_ , snake_case_ ): __magic_name__ = str(snake_case_ ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) __magic_name__ = None if not local_files_only: try: __magic_name__ = requests.head(snake_case_ , allow_redirects=snake_case_ , proxies=snake_case_ , timeout=snake_case_ ) if response.status_code == 200: __magic_name__ = response.headers.get('''ETag''' ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass __magic_name__ = url_to_filename(snake_case_ , snake_case_ ) # get cache path to put the file __magic_name__ = os.path.join(snake_case_ , snake_case_ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(snake_case_ ): return cache_path else: __magic_name__ = [ file for file in fnmatch.filter(os.listdir(snake_case_ ) , filename + '''.*''' ) if not file.endswith('''.json''' ) and not file.endswith('''.lock''' ) ] if len(snake_case_ ) > 0: return os.path.join(snake_case_ , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( '''Cannot find the requested files in the cached path and outgoing traffic has been''' ''' disabled. To enable model look-ups and downloads online, set \'local_files_only\'''' ''' to False.''' ) return None # From now on, etag is not None. if os.path.exists(snake_case_ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. __magic_name__ = cache_path + '''.lock''' with FileLock(snake_case_ ): # If the download just completed while the lock was activated. if os.path.exists(snake_case_ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: __magic_name__ = cache_path + '''.incomplete''' @contextmanager def _resumable_file_manager(): with open(snake_case_ , '''a+b''' ) as f: yield f __magic_name__ = _resumable_file_manager if os.path.exists(snake_case_ ): __magic_name__ = os.stat(snake_case_ ).st_size else: __magic_name__ = 0 else: __magic_name__ = partial(tempfile.NamedTemporaryFile , dir=snake_case_ , delete=snake_case_ ) __magic_name__ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( '''%s not found in cache or force_download set to True, downloading to %s''' , snake_case_ , temp_file.name , ) http_get( snake_case_ , snake_case_ , proxies=snake_case_ , resume_size=snake_case_ , user_agent=snake_case_ , ) os.replace(temp_file.name , snake_case_ ) __magic_name__ = {'''url''': url, '''etag''': etag} __magic_name__ = cache_path + '''.json''' with open(snake_case_ , '''w''' ) as meta_file: json.dump(snake_case_ , snake_case_ ) return cache_path def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[Any]=None ): __magic_name__ = url.encode('''utf-8''' ) __magic_name__ = shaaaa(snake_case_ ) __magic_name__ = url_hash.hexdigest() if etag: __magic_name__ = etag.encode('''utf-8''' ) __magic_name__ = shaaaa(snake_case_ ) filename += "." + etag_hash.hexdigest() if url.endswith('''.h5''' ): filename += ".h5" return filename def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str=None , snake_case_ : Tuple=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[Any]=False , snake_case_ : Union[str, Any]=None , snake_case_ : List[str]=False , snake_case_ : Optional[int]=False , snake_case_ : Optional[int]=False , ): if cache_dir is None: __magic_name__ = TRANSFORMERS_CACHE if isinstance(snake_case_ , snake_case_ ): __magic_name__ = str(snake_case_ ) if isinstance(snake_case_ , snake_case_ ): __magic_name__ = str(snake_case_ ) if is_remote_url(snake_case_ ): # URL, so get it from the cache (downloading if necessary) __magic_name__ = get_from_cache( snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , user_agent=snake_case_ , local_files_only=snake_case_ , ) elif os.path.exists(snake_case_ ): # File, and it exists. __magic_name__ = url_or_filename elif urlparse(snake_case_ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError('''file {} not found'''.format(snake_case_ ) ) else: # Something unknown raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case_ ) ) if extract_compressed_file: if not is_zipfile(snake_case_ ) and not tarfile.is_tarfile(snake_case_ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" __magic_name__ , __magic_name__ = os.path.split(snake_case_ ) __magic_name__ = output_file.replace('''.''' , '''-''' ) + '''-extracted''' __magic_name__ = os.path.join(snake_case_ , snake_case_ ) if os.path.isdir(snake_case_ ) and os.listdir(snake_case_ ) and not force_extract: return output_path_extracted # Prevent parallel extractions __magic_name__ = output_path + '''.lock''' with FileLock(snake_case_ ): shutil.rmtree(snake_case_ , ignore_errors=snake_case_ ) os.makedirs(snake_case_ ) if is_zipfile(snake_case_ ): with ZipFile(snake_case_ , '''r''' ) as zip_file: zip_file.extractall(snake_case_ ) zip_file.close() elif tarfile.is_tarfile(snake_case_ ): __magic_name__ = tarfile.open(snake_case_ ) tar_file.extractall(snake_case_ ) tar_file.close() else: raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case_ ) ) return output_path_extracted return output_path def _SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int="," ): assert isinstance(snake_case_ , snake_case_ ) if os.path.isfile(snake_case_ ): with open(snake_case_ ) as f: __magic_name__ = eval(f.read() ) else: __magic_name__ = requests.get(snake_case_ ) try: __magic_name__ = requests.json() except Exception: __magic_name__ = req.content.decode() assert data is not None, "could not connect" try: __magic_name__ = eval(snake_case_ ) except Exception: __magic_name__ = data.split('''\n''' ) req.close() return data def _SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ): __magic_name__ = requests.get(snake_case_ ) __magic_name__ = np.array(Image.open(BytesIO(response.content ) ) ) return img def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ): __magic_name__ = url.split('''/''' )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(snake_case_ ) with open(snake_case_ , '''rb''' ) as stream: __magic_name__ = pkl.load(snake_case_ ) __magic_name__ = weights.pop('''model''' ) __magic_name__ = {} for k, v in model.items(): __magic_name__ = torch.from_numpy(snake_case_ ) if "running_var" in k: __magic_name__ = torch.tensor([0] ) __magic_name__ = k.replace('''running_var''' , '''num_batches_tracked''' ) __magic_name__ = zero return new def _SCREAMING_SNAKE_CASE ( ): print(f'{os.path.abspath(os.path.join(snake_case_ , os.pardir ) )}/demo.ipynb' ) def _SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Tuple="RGB" ): assert isinstance(snake_case_ , snake_case_ ) if os.path.isfile(snake_case_ ): __magic_name__ = cva.imread(snake_case_ ) else: __magic_name__ = get_image_from_url(snake_case_ ) assert img is not None, f'could not connect to: {im}' __magic_name__ = cva.cvtColor(snake_case_ , cva.COLOR_BGR2RGB ) if input_format == "RGB": __magic_name__ = img[:, :, ::-1] return img def _SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict=1 ): return (images[i : i + batch] for i in range(0 , len(snake_case_ ) , snake_case_ ))
721
def _SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ): __magic_name__ = len(snake_case_ ) print('''The following activities are selected:''' ) # The first activity is always selected __magic_name__ = 0 print(snake_case_ , end=''',''' ) # Consider rest of the activities for j in range(snake_case_ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(snake_case_ , end=''',''' ) __magic_name__ = j if __name__ == "__main__": import doctest doctest.testmod() a_ : Dict = [1, 3, 0, 5, 8, 5] a_ : Union[str, Any] = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
678
0
from timeit import timeit def UpperCAmelCase__( __UpperCAmelCase : int ): if number < 0: raise ValueError('the value of input must not be negative' ) __snake_case : Dict = 0 while number: number &= number - 1 result += 1 return result def UpperCAmelCase__( __UpperCAmelCase : int ): if number < 0: raise ValueError('the value of input must not be negative' ) __snake_case : Tuple = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def UpperCAmelCase__( ): def do_benchmark(__UpperCAmelCase : int ) -> None: __snake_case : Optional[Any] = 'import __main__ as z' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" ) __snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" ) __snake_case : Dict = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
679
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
679
1
import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: __magic_name__ = None __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __magic_name__ = { '''vocab_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/nllb-200-distilled-600M''': ( '''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json''' ), }, } __magic_name__ = { '''facebook/nllb-large-en-ro''': 1_024, '''facebook/nllb-200-distilled-600M''': 1_024, } # fmt: off __magic_name__ = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn'''] class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = ["input_ids", "attention_mask"] __UpperCAmelCase = NllbTokenizer __UpperCAmelCase = [] __UpperCAmelCase = [] def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , **_UpperCAmelCase , ): # Mask token behave like a normal word, i.e. include the space before it __snake_case : Dict = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token __snake_case : Optional[int] = legacy_behaviour super().__init__( vocab_file=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , legacy_behaviour=_UpperCAmelCase , **_UpperCAmelCase , ) __snake_case : Optional[Any] = vocab_file __snake_case : int = False if not self.vocab_file else True __snake_case : int = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} ) __snake_case : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(_UpperCAmelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __snake_case : List[Any] = src_lang if src_lang is not None else 'eng_Latn' __snake_case : Any = self.convert_tokens_to_ids(self._src_lang ) __snake_case : int = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowercase_ ( self ): return self._src_lang @src_lang.setter def lowercase_ ( self , _UpperCAmelCase ): __snake_case : List[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __snake_case : str = [self.sep_token_id] __snake_case : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) __snake_case : List[Any] = src_lang __snake_case : str = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) __snake_case : List[Any] = self.convert_tokens_to_ids(_UpperCAmelCase ) __snake_case : int = tgt_lang_id return inputs def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = "eng_Latn" , _UpperCAmelCase = None , _UpperCAmelCase = "fra_Latn" , **_UpperCAmelCase , ): __snake_case : Optional[Any] = src_lang __snake_case : Tuple = tgt_lang return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) def lowercase_ ( self ): return self.set_src_lang_special_tokens(self.src_lang ) def lowercase_ ( self ): return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Optional[int] = self.convert_tokens_to_ids(_UpperCAmelCase ) if self.legacy_behaviour: __snake_case : List[Any] = [] __snake_case : Any = [self.eos_token_id, self.cur_lang_code] else: __snake_case : Union[str, Any] = [self.cur_lang_code] __snake_case : Optional[int] = [self.eos_token_id] __snake_case : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : int = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : List[Any] = self.convert_tokens_to_ids(_UpperCAmelCase ) if self.legacy_behaviour: __snake_case : Optional[int] = [] __snake_case : List[Any] = [self.eos_token_id, self.cur_lang_code] else: __snake_case : str = [self.cur_lang_code] __snake_case : List[str] = [self.eos_token_id] __snake_case : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) __snake_case : List[str] = self.convert_ids_to_tokens(self.suffix_tokens ) __snake_case : Any = processors.TemplateProcessing( single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory.""" ) return __snake_case : List[str] = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
679
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def lowercase_ ( self ): __snake_case : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) ) class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ): __snake_case : List[str] = parent __snake_case : Tuple = batch_size __snake_case : str = image_size __snake_case : Union[str, Any] = patch_size __snake_case : Optional[int] = num_channels __snake_case : List[str] = last_hidden_size __snake_case : Optional[Any] = num_attention_heads __snake_case : Dict = hidden_act __snake_case : List[Any] = conv_kernel_size __snake_case : int = output_stride __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Dict = attention_probs_dropout_prob __snake_case : Any = classifier_dropout_prob __snake_case : str = use_labels __snake_case : Optional[Any] = is_training __snake_case : Dict = num_labels __snake_case : str = initializer_range __snake_case : Union[str, Any] = scope def lowercase_ ( self ): __snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : str = None __snake_case : Dict = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ ( self ): return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : List[Any] = model(_UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Tuple = self.num_labels __snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Optional[Any] = self.num_labels __snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : Tuple = model(_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self ): __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs __snake_case : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) __UpperCAmelCase = ( { "feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def lowercase_ ( self ): __snake_case : Dict = MobileViTModelTester(self ) __snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def lowercase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def lowercase_ ( self ): pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def lowercase_ ( self ): pass @unittest.skip(reason='MobileViT does not output attentions' ) def lowercase_ ( self ): pass def lowercase_ ( self ): __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Tuple = model_class(_UpperCAmelCase ) __snake_case : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : List[str] = [*signature.parameters.keys()] __snake_case : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ): pass def lowercase_ ( self ): __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowercase_ ( self ): def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : str = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): __snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __snake_case : Optional[Any] = outputs.hidden_states __snake_case : str = 5 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __snake_case : Optional[Any] = 2 for i in range(len(_UpperCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Tuple = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase ) @slow def lowercase_ ( self ): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def UpperCAmelCase__( ): __snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" @cached_property def lowercase_ ( self ): return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def lowercase_ ( self ): __snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : str = prepare_img() __snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : Tuple = model(**_UpperCAmelCase ) # verify the logits __snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowercase_ ( self ): __snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : str = model.to(_UpperCAmelCase ) __snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : Optional[int] = prepare_img() __snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : int = model(**_UpperCAmelCase ) __snake_case : int = outputs.logits # verify the logits __snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , _UpperCAmelCase ) __snake_case : Optional[int] = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=_UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowercase_ ( self ): __snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : str = model.to(_UpperCAmelCase ) __snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : Any = prepare_img() __snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : Optional[Any] = model(**_UpperCAmelCase ) __snake_case : str = outputs.logits.detach().cpu() __snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] ) __snake_case : List[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , _UpperCAmelCase ) __snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase ) __snake_case : List[str] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
679
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __magic_name__ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" __UpperCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __UpperCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __UpperCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __UpperCAmelCase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def lowercase_ ( self ): __snake_case : Union[str, Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) __snake_case : List[str] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] ) __snake_case : Dict = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}] ) __snake_case : Tuple = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) __snake_case : List[str] = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] ) # Legacy behavior __snake_case : Any = text_classifier('This is great !' , return_all_scores=_UpperCAmelCase ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] ) __snake_case : str = text_classifier('This is great !' , return_all_scores=_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [[{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}]] ) __snake_case : List[Any] = text_classifier(['This is great !', 'Something else'] , return_all_scores=_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], [{'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_1', 'score': 0.496}], ] , ) __snake_case : List[Any] = text_classifier(['This is great !', 'Something else'] , return_all_scores=_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [ {'label': 'LABEL_0', 'score': 0.504}, {'label': 'LABEL_0', 'score': 0.504}, ] , ) @require_torch def lowercase_ ( self ): import torch __snake_case : List[str] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) __snake_case : Optional[int] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @require_tf def lowercase_ ( self ): __snake_case : str = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) __snake_case : List[Any] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'LABEL_0', 'score': 0.504}] ) @slow @require_torch def lowercase_ ( self ): __snake_case : Optional[Any] = pipeline('text-classification' ) __snake_case : List[Any] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] ) __snake_case : Optional[Any] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) __snake_case : List[Any] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 0.988}] ) @slow @require_tf def lowercase_ ( self ): __snake_case : int = pipeline('text-classification' , framework='tf' ) __snake_case : str = text_classifier('This is great !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 1.0}] ) __snake_case : Union[str, Any] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) __snake_case : Union[str, Any] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': 'POSITIVE', 'score': 0.988}] ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Tuple = TextClassificationPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) return text_classifier, ["HuggingFace is in", "This is another test"] def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : int = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __snake_case : Union[str, Any] = 'HuggingFace is in' __snake_case : Dict = text_classifier(_UpperCAmelCase ) self.assertEqual(nested_simplify(_UpperCAmelCase ) , [{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) __snake_case : Tuple = ['HuggingFace is in ', 'Paris is in France'] __snake_case : Tuple = text_classifier(_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}, {'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __snake_case : Union[str, Any] = text_classifier(_UpperCAmelCase , top_k=_UpperCAmelCase ) __snake_case : Dict = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [[{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] * N, [{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] * N] , ) __snake_case : Optional[int] = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} __snake_case : List[Any] = text_classifier(_UpperCAmelCase ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , {'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __snake_case : int = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(_UpperCAmelCase ): text_classifier(_UpperCAmelCase ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __snake_case : int = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(_UpperCAmelCase ) , [{'label': ANY(_UpperCAmelCase ), 'score': ANY(_UpperCAmelCase )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
679
def UpperCAmelCase__( __UpperCAmelCase : int | float | str ): try: __snake_case : int = float(__UpperCAmelCase ) except ValueError: raise ValueError('Please enter a valid number' ) __snake_case : Any = decimal - int(__UpperCAmelCase ) if fractional_part == 0: return int(__UpperCAmelCase ), 1 else: __snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] ) __snake_case : Tuple = int(decimal * (10**number_of_frac_digits) ) __snake_case : List[Any] = 10**number_of_frac_digits __snake_case , __snake_case : List[Any] = denominator, numerator while True: __snake_case : Any = dividend % divisor if remainder == 0: break __snake_case , __snake_case : Optional[int] = divisor, remainder __snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor return int(__UpperCAmelCase ), int(__UpperCAmelCase ) if __name__ == "__main__": print(F'''{decimal_to_fraction(2) = }''') print(F'''{decimal_to_fraction(89.0) = }''') print(F'''{decimal_to_fraction("67") = }''') print(F'''{decimal_to_fraction("45.0") = }''') print(F'''{decimal_to_fraction(1.5) = }''') print(F'''{decimal_to_fraction("6.25") = }''') print(F'''{decimal_to_fraction("78td") = }''')
679
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ): __snake_case : int = parent __snake_case : Union[str, Any] = batch_size __snake_case : List[Any] = seq_length __snake_case : Optional[Any] = is_training __snake_case : Optional[Any] = use_attention_mask __snake_case : Tuple = use_token_type_ids __snake_case : Union[str, Any] = use_labels __snake_case : Optional[Any] = vocab_size __snake_case : int = hidden_size __snake_case : int = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Optional[int] = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : List[Any] = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : Dict = initializer_range __snake_case : Optional[int] = num_choices def lowercase_ ( self ): __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Optional[int] = None if self.use_attention_mask: __snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : Optional[int] = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowercase_ ( self ): __snake_case : List[str] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs __snake_case : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowercase_ ( self ): __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = config_and_inputs __snake_case : Optional[int] = True __snake_case : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = True __UpperCAmelCase = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowercase_ ( self ): __snake_case : List[str] = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowercase_ ( self ): for model_class_name in self.all_model_classes: __snake_case : List[Any] = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_UpperCAmelCase ) __snake_case : str = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" @slow def lowercase_ ( self ): __snake_case : str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_UpperCAmelCase ) __snake_case : Dict = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __snake_case : Any = model(_UpperCAmelCase )[0] __snake_case : str = [1, 11, 50_265] self.assertEqual(list(output.shape ) , _UpperCAmelCase ) # compare the actual values for a slice. __snake_case : str = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowercase_ ( self ): __snake_case : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_UpperCAmelCase ) __snake_case : Optional[Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) __snake_case : Optional[Any] = model(_UpperCAmelCase )[0] # compare the actual values for a slice. __snake_case : List[Any] = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
679
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') __magic_name__ = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __UpperCAmelCase = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def lowercase_ ( self ): if self.train_file is not None: __snake_case : Union[str, Any] = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __snake_case : List[str] = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = True __UpperCAmelCase = None __UpperCAmelCase = None def __call__( self , _UpperCAmelCase ): __snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels' __snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features] __snake_case : List[Any] = len(_UpperCAmelCase ) __snake_case : Union[str, Any] = len(features[0]['input_ids'] ) __snake_case : Union[str, Any] = [ [{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features ] __snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) ) __snake_case : Optional[Any] = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten __snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()} # Add back labels __snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) return batch def UpperCAmelCase__( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __snake_case : Tuple = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) datasets.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __snake_case : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __snake_case : Optional[int] = {} if data_args.train_file is not None: __snake_case : Optional[int] = data_args.train_file if data_args.validation_file is not None: __snake_case : int = data_args.validation_file __snake_case : int = data_args.train_file.split('.' )[-1] __snake_case : Tuple = load_dataset( __UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __snake_case : Optional[int] = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __snake_case : str = [F"""ending{i}""" for i in range(4 )] __snake_case : Optional[Any] = 'sent1' __snake_case : Tuple = 'sent2' if data_args.max_seq_length is None: __snake_case : List[Any] = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) __snake_case : List[Any] = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(__UpperCAmelCase : Tuple ): __snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]] __snake_case : Union[str, Any] = examples[question_header_name] __snake_case : Optional[int] = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase ) ] # Flatten out __snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) ) __snake_case : int = list(chain(*__UpperCAmelCase ) ) # Tokenize __snake_case : Tuple = tokenizer( __UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) __snake_case : Optional[Any] = raw_datasets['train'] if data_args.max_train_samples is not None: __snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples ) __snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): __snake_case : int = train_dataset.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) __snake_case : Optional[Any] = raw_datasets['validation'] if data_args.max_eval_samples is not None: __snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples ) __snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): __snake_case : List[Any] = eval_dataset.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __snake_case : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(__UpperCAmelCase : int ): __snake_case , __snake_case : Union[str, Any] = eval_predictions __snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __snake_case : List[str] = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , ) # Training if training_args.do_train: __snake_case : Dict = None if training_args.resume_from_checkpoint is not None: __snake_case : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: __snake_case : List[str] = last_checkpoint __snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __snake_case : List[Any] = train_result.metrics __snake_case : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase ) ) __snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics('train' , __UpperCAmelCase ) trainer.save_metrics('train' , __UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : Dict = trainer.evaluate() __snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase ) __snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics('eval' , __UpperCAmelCase ) trainer.save_metrics('eval' , __UpperCAmelCase ) __snake_case : List[Any] = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
679
1
from typing import TYPE_CHECKING from ...utils import _LazyModule __magic_name__ = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
679
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = '''▁''' __magic_name__ = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } __magic_name__ = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } __magic_name__ = { '''facebook/s2t-small-librispeech-asr''': 1_024, } __magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] __magic_name__ = {'''mustc''': MUSTC_LANGS} class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = MAX_MODEL_INPUT_SIZES __UpperCAmelCase = ["input_ids", "attention_mask"] __UpperCAmelCase = [] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ): __snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __snake_case : Dict = do_upper_case __snake_case : Optional[Any] = do_lower_case __snake_case : List[Any] = load_json(_UpperCAmelCase ) __snake_case : Dict = {v: k for k, v in self.encoder.items()} __snake_case : Optional[Any] = spm_file __snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs ) if lang_codes is not None: __snake_case : Optional[Any] = lang_codes __snake_case : int = LANGUAGES[lang_codes] __snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs] __snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs} __snake_case : Dict = self.lang_tokens __snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __snake_case : Optional[int] = {} @property def lowercase_ ( self ): return len(self.encoder ) @property def lowercase_ ( self ): return self._tgt_lang @tgt_lang.setter def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = new_tgt_lang self.set_tgt_lang_special_tokens(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Tuple = self.lang_code_to_id[tgt_lang] __snake_case : Optional[Any] = [lang_code_id] def lowercase_ ( self , _UpperCAmelCase ): return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] ) def lowercase_ ( self , _UpperCAmelCase ): return self.decoder.get(_UpperCAmelCase , self.unk_token ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = [] __snake_case : Any = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __snake_case : Dict = self.sp_model.decode(_UpperCAmelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __snake_case : Any = [] else: current_sub_tokens.append(_UpperCAmelCase ) __snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) __snake_case : Union[str, Any] = [1] * len(self.prefix_tokens ) __snake_case : Optional[Any] = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def lowercase_ ( self ): __snake_case : List[Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __snake_case : int = self.__dict__.copy() __snake_case : str = None return state def __setstate__( self , _UpperCAmelCase ): __snake_case : List[Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __snake_case : Optional[int] = {} __snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __snake_case : str = Path(_UpperCAmelCase ) assert save_dir.is_dir(), F"""{save_directory} should be a directory""" __snake_case : int = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) __snake_case : Union[str, Any] = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , _UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(_UpperCAmelCase , 'wb' ) as fi: __snake_case : List[str] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (str(_UpperCAmelCase ), str(_UpperCAmelCase )) def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ): __snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase ) spm.Load(str(__UpperCAmelCase ) ) return spm def UpperCAmelCase__( __UpperCAmelCase : str ): with open(__UpperCAmelCase , 'r' ) as f: return json.load(__UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ): with open(__UpperCAmelCase , 'w' ) as f: json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
679
1
from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract __magic_name__ = logging.get_logger(__name__) def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] ): return [ int(10_00 * (box[0] / width) ), int(10_00 * (box[1] / height) ), int(10_00 * (box[2] / width) ), int(10_00 * (box[3] / height) ), ] def UpperCAmelCase__( __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] ): __snake_case : int = to_pil_image(__UpperCAmelCase ) __snake_case , __snake_case : Any = pil_image.size __snake_case : Tuple = pytesseract.image_to_data(__UpperCAmelCase , lang=__UpperCAmelCase , output_type='dict' , config=__UpperCAmelCase ) __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : int = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates __snake_case : Optional[int] = [idx for idx, word in enumerate(__UpperCAmelCase ) if not word.strip()] __snake_case : Any = [word for idx, word in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices] __snake_case : Union[str, Any] = [coord for idx, coord in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices] __snake_case : Optional[Any] = [coord for idx, coord in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices] __snake_case : List[str] = [coord for idx, coord in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices] __snake_case : List[str] = [coord for idx, coord in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __snake_case : Any = [] for x, y, w, h in zip(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __snake_case : Union[str, Any] = [x, y, x + w, y + h] actual_boxes.append(__UpperCAmelCase ) # finally, normalize the bounding boxes __snake_case : Optional[Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) ) assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = ["pixel_values"] def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = "" , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) __snake_case : List[Any] = size if size is not None else {'height': 224, 'width': 224} __snake_case : Any = get_size_dict(_UpperCAmelCase ) __snake_case : int = do_resize __snake_case : Union[str, Any] = size __snake_case : Optional[Any] = resample __snake_case : Tuple = do_rescale __snake_case : List[str] = rescale_value __snake_case : str = do_normalize __snake_case : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __snake_case : int = image_std if image_std is not None else IMAGENET_STANDARD_STD __snake_case : Dict = apply_ocr __snake_case : List[Any] = ocr_lang __snake_case : str = tesseract_config def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BILINEAR , _UpperCAmelCase = None , **_UpperCAmelCase , ): __snake_case : str = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __snake_case : Dict = (size['height'], size['width']) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ): return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase=None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ): __snake_case : Tuple = do_resize if do_resize is not None else self.do_resize __snake_case : Union[str, Any] = size if size is not None else self.size __snake_case : Dict = get_size_dict(_UpperCAmelCase ) __snake_case : Optional[int] = resample if resample is not None else self.resample __snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale __snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : Any = image_mean if image_mean is not None else self.image_mean __snake_case : Dict = image_std if image_std is not None else self.image_std __snake_case : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr __snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang __snake_case : Tuple = tesseract_config if tesseract_config is not None else self.tesseract_config __snake_case : str = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' ) # All transformations expect numpy arrays. __snake_case : List[Any] = [to_numpy_array(_UpperCAmelCase ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , 'pytesseract' ) __snake_case : Optional[int] = [] __snake_case : str = [] for image in images: __snake_case , __snake_case : Optional[Any] = apply_tesseract(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) words_batch.append(_UpperCAmelCase ) boxes_batch.append(_UpperCAmelCase ) if do_resize: __snake_case : Optional[int] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_rescale: __snake_case : List[str] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: __snake_case : List[Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] __snake_case : int = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] __snake_case : str = BatchFeature(data={'pixel_values': images} , tensor_type=_UpperCAmelCase ) if apply_ocr: __snake_case : Dict = words_batch __snake_case : Any = boxes_batch return data
679
def UpperCAmelCase__( __UpperCAmelCase : list ): __snake_case : List[Any] = len(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: __snake_case , __snake_case : int = arr[i + 1], arr[i] return arr if __name__ == "__main__": __magic_name__ = list(range(10, 0, -1)) print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
679
1
from math import pi, sqrt def UpperCAmelCase__( __UpperCAmelCase : float ): if num <= 0: raise ValueError('math domain error' ) if num > 171.5: raise OverflowError('math range error' ) elif num - int(__UpperCAmelCase ) not in (0, 0.5): raise NotImplementedError('num must be an integer or a half-integer' ) elif num == 0.5: return sqrt(__UpperCAmelCase ) else: return 1.0 if num == 1 else (num - 1) * gamma(num - 1 ) def UpperCAmelCase__( ): assert gamma(0.5 ) == sqrt(__UpperCAmelCase ) assert gamma(1 ) == 1.0 assert gamma(2 ) == 1.0 if __name__ == "__main__": from doctest import testmod testmod() __magic_name__ = 1.0 while num: __magic_name__ = float(input('''Gamma of: ''')) print(F'''gamma({num}) = {gamma(num)}''') print('''\nEnter 0 to exit...''')
679
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __magic_name__ = '''pt''' elif is_tf_available(): __magic_name__ = '''tf''' else: __magic_name__ = '''jax''' class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = PerceiverTokenizer __UpperCAmelCase = False def lowercase_ ( self ): super().setUp() __snake_case : str = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase_ ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def lowercase_ ( self , **_UpperCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __snake_case : List[Any] = [] for i in range(len(_UpperCAmelCase ) ): try: __snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) __snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) ) __snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: __snake_case : List[str] = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: __snake_case : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __snake_case : List[Any] = [t[0] for t in toks] # Ensure consistency __snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: __snake_case : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: __snake_case : List[Any] = ' ' + output_txt __snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def lowercase_ ( self ): __snake_case : List[Any] = self.perceiver_tokenizer __snake_case : Dict = 'Unicode €.' __snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase ) __snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding __snake_case : int = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' ) __snake_case : Optional[Any] = tokenizer('e è é ê ë' ) __snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding __snake_case : str = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def lowercase_ ( self ): __snake_case : Union[str, Any] = self.perceiver_tokenizer __snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on __snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) if FRAMEWORK != "jax": __snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: __snake_case : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def lowercase_ ( self ): __snake_case : Dict = self.perceiver_tokenizer __snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _UpperCAmelCase ) self.assertIn('attention_mask' , _UpperCAmelCase ) self.assertNotIn('decoder_input_ids' , _UpperCAmelCase ) self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : List[str] = self.perceiver_tokenizer __snake_case : Tuple = [ 'Summary of the text.', 'Another summary.', ] __snake_case : int = tokenizer( text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def lowercase_ ( self ): # safety check on max_len default value so we are sure the test works __snake_case : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running' __snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) __snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) __snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) __snake_case : Dict = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __snake_case : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) __snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) __snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __snake_case : Any = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __snake_case : List[str] = json.load(_UpperCAmelCase ) __snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )] __snake_case : Dict = added_tokens_extra_ids + [ 'an_additional_special_token' ] __snake_case : List[Any] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Optional[Any] = tokenizer_class.from_pretrained( _UpperCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )] __snake_case : str = tokenizer_class.from_pretrained( _UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def lowercase_ ( self ): __snake_case : Tuple = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '�' ) def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens __snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] __snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
679
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" @slow def lowercase_ ( self ): __snake_case : Any = TFAutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' ) __snake_case : Optional[int] = AutoTokenizer.from_pretrained('google/mt5-small' ) __snake_case : Optional[int] = tokenizer('Hello there' , return_tensors='tf' ).input_ids __snake_case : Tuple = tokenizer('Hi I am' , return_tensors='tf' ).input_ids __snake_case : List[str] = model(_UpperCAmelCase , labels=_UpperCAmelCase ).loss __snake_case : int = -tf.math.reduce_mean(_UpperCAmelCase ).numpy() __snake_case : Any = -21.228168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
679
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = None # Automatically constructed __UpperCAmelCase = "dict" __UpperCAmelCase = None __UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase) def __call__( self ): return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase_ ( self ): from .features import Value return {k: Value('string' ) for k in sorted(self.languages )} @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None # Automatically constructed __UpperCAmelCase = "dict" __UpperCAmelCase = None __UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase) def lowercase_ ( self ): __snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None __snake_case : Optional[Any] = len(self.languages ) if self.languages else None def __call__( self ): return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Optional[int] = set(self.languages ) if self.languages and set(_UpperCAmelCase ) - lang_set: raise ValueError( F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __snake_case : Any = [] for lang, text in translation_dict.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) ) return {"language": languages, "translation": translations} def lowercase_ ( self ): from .features import Sequence, Value return { "language": Sequence(Value('string' ) ), "translation": Sequence(Value('string' ) ), }
679
1
import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple="shi-labs/oneformer_demo" ): with open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='dataset' ) , 'r' ) as f: __snake_case : Optional[int] = json.load(__UpperCAmelCase ) __snake_case : Tuple = {} __snake_case : List[str] = [] __snake_case : Optional[Any] = [] for key, info in class_info.items(): __snake_case : Any = info['name'] class_names.append(info['name'] ) if info["isthing"]: thing_ids.append(int(__UpperCAmelCase ) ) __snake_case : Union[str, Any] = thing_ids __snake_case : Union[str, Any] = class_names return metadata class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=10 , _UpperCAmelCase=False , _UpperCAmelCase=255 , _UpperCAmelCase="shi-labs/oneformer_demo" , _UpperCAmelCase="ade20k_panoptic.json" , _UpperCAmelCase=10 , ): __snake_case : Dict = parent __snake_case : List[str] = batch_size __snake_case : Any = num_channels __snake_case : Any = min_resolution __snake_case : str = max_resolution __snake_case : List[Any] = do_resize __snake_case : Tuple = {'shortest_edge': 32, 'longest_edge': 1_333} if size is None else size __snake_case : Dict = do_normalize __snake_case : Any = image_mean __snake_case : Any = image_std __snake_case : Dict = class_info_file __snake_case : Any = prepare_metadata(_UpperCAmelCase , _UpperCAmelCase ) __snake_case : str = num_text __snake_case : Optional[Any] = repo_path # for the post_process_functions __snake_case : Optional[Any] = 2 __snake_case : List[str] = 10 __snake_case : int = 10 __snake_case : List[str] = 3 __snake_case : int = 4 __snake_case : str = num_labels __snake_case : Optional[Any] = do_reduce_labels __snake_case : Optional[int] = ignore_index def lowercase_ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False ): if not batched: __snake_case : Any = image_inputs[0] if isinstance(_UpperCAmelCase , Image.Image ): __snake_case , __snake_case : Optional[int] = image.size else: __snake_case , __snake_case : List[Any] = image.shape[1], image.shape[2] if w < h: __snake_case : List[Any] = int(self.size['shortest_edge'] * h / w ) __snake_case : Optional[int] = self.size['shortest_edge'] elif w > h: __snake_case : Union[str, Any] = self.size['shortest_edge'] __snake_case : Tuple = int(self.size['shortest_edge'] * w / h ) else: __snake_case : List[Any] = self.size['shortest_edge'] __snake_case : int = self.size['shortest_edge'] else: __snake_case : Optional[int] = [] for image in image_inputs: __snake_case , __snake_case : List[str] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __snake_case : Optional[Any] = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0] __snake_case : Tuple = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1] return expected_height, expected_width def lowercase_ ( self ): return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __UpperCAmelCase = image_processing_class def lowercase_ ( self ): __snake_case : Optional[Any] = OneFormerImageProcessorTester(self ) @property def lowercase_ ( self ): return self.image_processing_tester.prepare_image_processor_dict() def lowercase_ ( self ): __snake_case : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , 'image_mean' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'image_std' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_normalize' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'ignore_index' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'class_info_file' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'num_text' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'repo_path' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'metadata' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_reduce_labels' ) ) def lowercase_ ( self ): pass def lowercase_ ( self ): # Initialize image_processor __snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __snake_case : Optional[int] = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values __snake_case , __snake_case : Dict = self.image_processing_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case , __snake_case : str = self.image_processing_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) __snake_case : Dict = image_processor( _UpperCAmelCase , ['semantic'] * len(_UpperCAmelCase ) , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self ): # Initialize image_processor __snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __snake_case : int = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values __snake_case , __snake_case : Dict = self.image_processing_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case , __snake_case : int = self.image_processing_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) __snake_case : Optional[Any] = image_processor( _UpperCAmelCase , ['semantic'] * len(_UpperCAmelCase ) , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self ): # Initialize image_processor __snake_case : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __snake_case : Tuple = image_processor(image_inputs[0] , ['semantic'] , return_tensors='pt' ).pixel_values __snake_case , __snake_case : Optional[int] = self.image_processing_tester.get_expected_values(_UpperCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __snake_case , __snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase ) __snake_case : List[str] = image_processor( _UpperCAmelCase , ['semantic'] * len(_UpperCAmelCase ) , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase="np" ): __snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) # prepare image and target __snake_case : Union[str, Any] = self.image_processing_tester.num_labels __snake_case : str = None __snake_case : Tuple = None __snake_case : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=_UpperCAmelCase ) if with_segmentation_maps: __snake_case : List[str] = num_labels if is_instance_map: __snake_case : str = list(range(_UpperCAmelCase ) ) * 2 __snake_case : Union[str, Any] = dict(enumerate(_UpperCAmelCase ) ) __snake_case : str = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": __snake_case : Optional[Any] = [Image.fromarray(_UpperCAmelCase ) for annotation in annotations] __snake_case : Dict = image_processor( _UpperCAmelCase , ['semantic'] * len(_UpperCAmelCase ) , _UpperCAmelCase , return_tensors='pt' , instance_id_to_semantic_id=_UpperCAmelCase , pad_and_return_pixel_mask=_UpperCAmelCase , ) return inputs def lowercase_ ( self ): pass def lowercase_ ( self ): def common(_UpperCAmelCase=False , _UpperCAmelCase=None ): __snake_case : str = self.comm_get_image_processor_inputs( with_segmentation_maps=_UpperCAmelCase , is_instance_map=_UpperCAmelCase , segmentation_type=_UpperCAmelCase ) __snake_case : Union[str, Any] = inputs['mask_labels'] __snake_case : Any = inputs['class_labels'] __snake_case : Optional[int] = inputs['pixel_values'] __snake_case : str = inputs['text_inputs'] # check the batch_size for mask_label, class_label, text_input in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(_UpperCAmelCase ) , self.image_processing_tester.num_text ) common() common(is_instance_map=_UpperCAmelCase ) common(is_instance_map=_UpperCAmelCase , segmentation_type='pil' ) common(is_instance_map=_UpperCAmelCase , segmentation_type='pil' ) def lowercase_ ( self ): __snake_case : List[Any] = np.zeros((20, 50) ) __snake_case : Any = 1 __snake_case : Optional[int] = 1 __snake_case : Dict = 1 __snake_case : Union[str, Any] = binary_mask_to_rle(_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def lowercase_ ( self ): __snake_case : Union[str, Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , ) __snake_case : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs() __snake_case : str = fature_extractor.post_process_semantic_segmentation(_UpperCAmelCase ) self.assertEqual(len(_UpperCAmelCase ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) __snake_case : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )] __snake_case : Any = fature_extractor.post_process_semantic_segmentation(_UpperCAmelCase , target_sizes=_UpperCAmelCase ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def lowercase_ ( self ): __snake_case : Optional[int] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , ) __snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs() __snake_case : List[str] = image_processor.post_process_instance_segmentation(_UpperCAmelCase , threshold=0 ) self.assertTrue(len(_UpperCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('segmentation' in el ) self.assertTrue('segments_info' in el ) self.assertEqual(type(el['segments_info'] ) , _UpperCAmelCase ) self.assertEqual( el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def lowercase_ ( self ): __snake_case : Optional[int] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='ade20k_panoptic.json' , num_text=self.image_processing_tester.num_text , repo_path='shi-labs/oneformer_demo' , ) __snake_case : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs() __snake_case : List[str] = image_processor.post_process_panoptic_segmentation(_UpperCAmelCase , threshold=0 ) self.assertTrue(len(_UpperCAmelCase ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue('segmentation' in el ) self.assertTrue('segments_info' in el ) self.assertEqual(type(el['segments_info'] ) , _UpperCAmelCase ) self.assertEqual( el['segmentation'].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
679
from __future__ import annotations __magic_name__ = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ): __snake_case : Optional[int] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) ) ] # the reference grid __snake_case : List[str] = 1 __snake_case : str = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) ) ] # the action grid __snake_case : Dict = init[0] __snake_case : List[str] = init[1] __snake_case : Optional[Any] = 0 __snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell __snake_case : Any = [[f, g, x, y]] __snake_case : List[str] = False # flag that is set when search is complete __snake_case : str = False # flag set if we can't find expand while not found and not resign: if len(__UpperCAmelCase ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __snake_case : List[Any] = cell.pop() __snake_case : Optional[int] = next_cell[2] __snake_case : int = next_cell[3] __snake_case : Optional[Any] = next_cell[1] if x == goal[0] and y == goal[1]: __snake_case : Union[str, Any] = True else: for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions __snake_case : Tuple = x + DIRECTIONS[i][0] __snake_case : Tuple = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __snake_case : List[str] = g + cost __snake_case : Optional[Any] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __snake_case : Dict = 1 __snake_case : Any = i __snake_case : Tuple = [] __snake_case : Dict = goal[0] __snake_case : Optional[int] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __snake_case : Tuple = x - DIRECTIONS[action[x][y]][0] __snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1] __snake_case : Tuple = xa __snake_case : List[str] = ya invpath.append([x, y] ) __snake_case : Dict = [] for i in range(len(__UpperCAmelCase ) ): path.append(invpath[len(__UpperCAmelCase ) - 1 - i] ) return path, action if __name__ == "__main__": __magic_name__ = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] __magic_name__ = [0, 0] # all coordinates are given in format [y,x] __magic_name__ = [len(grid) - 1, len(grid[0]) - 1] __magic_name__ = 1 # the cost map which pushes the path closer to the goal __magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): __magic_name__ = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map __magic_name__ = 99 __magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
679
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __magic_name__ = '''pt''' elif is_tf_available(): __magic_name__ = '''tf''' else: __magic_name__ = '''jax''' class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = PerceiverTokenizer __UpperCAmelCase = False def lowercase_ ( self ): super().setUp() __snake_case : str = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase_ ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def lowercase_ ( self , **_UpperCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __snake_case : List[Any] = [] for i in range(len(_UpperCAmelCase ) ): try: __snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) __snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) ) __snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: __snake_case : List[str] = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: __snake_case : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __snake_case : List[Any] = [t[0] for t in toks] # Ensure consistency __snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: __snake_case : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: __snake_case : List[Any] = ' ' + output_txt __snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def lowercase_ ( self ): __snake_case : List[Any] = self.perceiver_tokenizer __snake_case : Dict = 'Unicode €.' __snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase ) __snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding __snake_case : int = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' ) __snake_case : Optional[Any] = tokenizer('e è é ê ë' ) __snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding __snake_case : str = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def lowercase_ ( self ): __snake_case : Union[str, Any] = self.perceiver_tokenizer __snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on __snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) if FRAMEWORK != "jax": __snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: __snake_case : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def lowercase_ ( self ): __snake_case : Dict = self.perceiver_tokenizer __snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _UpperCAmelCase ) self.assertIn('attention_mask' , _UpperCAmelCase ) self.assertNotIn('decoder_input_ids' , _UpperCAmelCase ) self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : List[str] = self.perceiver_tokenizer __snake_case : Tuple = [ 'Summary of the text.', 'Another summary.', ] __snake_case : int = tokenizer( text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def lowercase_ ( self ): # safety check on max_len default value so we are sure the test works __snake_case : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running' __snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) __snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) __snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) __snake_case : Dict = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __snake_case : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) __snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) __snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __snake_case : Any = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __snake_case : List[str] = json.load(_UpperCAmelCase ) __snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )] __snake_case : Dict = added_tokens_extra_ids + [ 'an_additional_special_token' ] __snake_case : List[Any] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Optional[Any] = tokenizer_class.from_pretrained( _UpperCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )] __snake_case : str = tokenizer_class.from_pretrained( _UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def lowercase_ ( self ): __snake_case : Tuple = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '�' ) def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens __snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] __snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
679
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "instructblip_vision_model" def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) __snake_case : Optional[Any] = hidden_size __snake_case : Any = intermediate_size __snake_case : str = num_hidden_layers __snake_case : Any = num_attention_heads __snake_case : int = patch_size __snake_case : Dict = image_size __snake_case : Any = initializer_range __snake_case : List[Any] = attention_dropout __snake_case : Optional[Any] = layer_norm_eps __snake_case : Optional[int] = hidden_act __snake_case : int = qkv_bias @classmethod def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ): cls._set_token_in_kwargs(_UpperCAmelCase ) __snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __snake_case : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "instructblip_qformer" def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __snake_case : Union[str, Any] = vocab_size __snake_case : List[Any] = hidden_size __snake_case : str = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Optional[Any] = hidden_act __snake_case : int = intermediate_size __snake_case : str = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Union[str, Any] = max_position_embeddings __snake_case : Dict = initializer_range __snake_case : Any = layer_norm_eps __snake_case : Union[str, Any] = position_embedding_type __snake_case : Optional[int] = cross_attention_frequency __snake_case : Union[str, Any] = encoder_hidden_size @classmethod def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ): cls._set_token_in_kwargs(_UpperCAmelCase ) __snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __snake_case : List[Any] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "instructblip" __UpperCAmelCase = True def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ): super().__init__(**_UpperCAmelCase ) if vision_config is None: __snake_case : List[str] = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __snake_case : Union[str, Any] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __snake_case : str = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase ) __snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase ) __snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt' __snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase ) __snake_case : List[Any] = self.text_config.tie_word_embeddings __snake_case : Optional[int] = self.text_config.is_encoder_decoder __snake_case : List[str] = num_query_tokens __snake_case : Tuple = self.vision_config.hidden_size __snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __snake_case : str = 1.0 __snake_case : Optional[int] = 0.02 @classmethod def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , ) def lowercase_ ( self ): __snake_case : Tuple = copy.deepcopy(self.__dict__ ) __snake_case : Tuple = self.vision_config.to_dict() __snake_case : List[Any] = self.qformer_config.to_dict() __snake_case : Optional[int] = self.text_config.to_dict() __snake_case : List[str] = self.__class__.model_type return output
679
1
import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __magic_name__ = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __magic_name__ = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' __magic_name__ = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __SCREAMING_SNAKE_CASE ( datasets.Metric): """simple docstring""" def lowercase_ ( self ): if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[ 'https://github.com/m-popovic/chrF', ] , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = CHRF.CHAR_ORDER , _UpperCAmelCase = CHRF.WORD_ORDER , _UpperCAmelCase = CHRF.BETA , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = False , ): __snake_case : Tuple = len(references[0] ) if any(len(_UpperCAmelCase ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) __snake_case : Optional[Any] = [[refs[i] for refs in references] for i in range(_UpperCAmelCase )] __snake_case : int = CHRF(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) __snake_case : List[str] = sb_chrf.corpus_score(_UpperCAmelCase , _UpperCAmelCase ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
679
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __magic_name__ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
679
1
def UpperCAmelCase__( __UpperCAmelCase : float , __UpperCAmelCase : float ): if mass < 0: raise ValueError('The mass of a body cannot be negative' ) return 0.5 * mass * abs(__UpperCAmelCase ) * abs(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
679
import math import os import sys def UpperCAmelCase__( __UpperCAmelCase : str ): __snake_case : Union[str, Any] = '' try: with open(__UpperCAmelCase , 'rb' ) as binary_file: __snake_case : Optional[Any] = binary_file.read() for dat in data: __snake_case : Tuple = F"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ): lexicon.pop(__UpperCAmelCase ) __snake_case : Union[str, Any] = last_match_id if math.loga(__UpperCAmelCase ).is_integer(): for curr_key in lexicon: __snake_case : Tuple = '0' + lexicon[curr_key] __snake_case : Any = bin(__UpperCAmelCase )[2:] def UpperCAmelCase__( __UpperCAmelCase : str ): __snake_case : Tuple = {'0': '0', '1': '1'} __snake_case , __snake_case : Optional[int] = '', '' __snake_case : str = len(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __snake_case : Optional[int] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) index += 1 __snake_case : Union[str, Any] = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __snake_case : Any = lexicon[curr_string] result += last_match_id return result def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ): __snake_case : str = os.path.getsize(__UpperCAmelCase ) __snake_case : List[Any] = bin(__UpperCAmelCase )[2:] __snake_case : Any = len(__UpperCAmelCase ) return "0" * (length_length - 1) + file_length_binary + compressed def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ): __snake_case : Tuple = 8 try: with open(__UpperCAmelCase , 'wb' ) as opened_file: __snake_case : int = [ to_write[i : i + byte_length] for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ): __snake_case : str = read_file_binary(__UpperCAmelCase ) __snake_case : Tuple = compress_data(__UpperCAmelCase ) __snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase ) write_file_binary(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
679
1
from typing import Dict, Optional import numpy as np import datasets __magic_name__ = ''' IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation, the mean IoU of the image is calculated by taking the IoU of each class and averaging them. ''' __magic_name__ = ''' Args: predictions (`List[ndarray]`): List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. references (`List[ndarray]`): List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size. num_labels (`int`): Number of classes (categories). ignore_index (`int`): Index that will be ignored during evaluation. nan_to_num (`int`, *optional*): If specified, NaN values will be replaced by the number defined by the user. label_map (`dict`, *optional*): If specified, dictionary mapping old label indices to new label indices. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255. Returns: `Dict[str, float | ndarray]` comprising various elements: - *mean_iou* (`float`): Mean Intersection-over-Union (IoU averaged over all categories). - *mean_accuracy* (`float`): Mean accuracy (averaged over all categories). - *overall_accuracy* (`float`): Overall accuracy on all images. - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`): Per category accuracy. - *per_category_iou* (`ndarray` of shape `(num_labels,)`): Per category IoU. Examples: >>> import numpy as np >>> mean_iou = datasets.load_metric("mean_iou") >>> # suppose one has 3 different segmentation maps predicted >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]]) >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]]) >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]]) >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]]) >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]]) >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]]) >>> predicted = [predicted_1, predicted_2, predicted_3] >>> ground_truth = [actual_1, actual_2, actual_3] >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False) >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])} ''' __magic_name__ = '''\ @software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020, author = {{MMSegmentation Contributors}}, license = {Apache-2.0}, month = {7}, title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}}, url = {https://github.com/open-mmlab/mmsegmentation}, year = {2020} }''' def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[Dict[int, int]] = None , __UpperCAmelCase : bool = False , ): if label_map is not None: for old_id, new_id in label_map.items(): __snake_case : Tuple = new_id # turn into Numpy arrays __snake_case : Optional[int] = np.array(__UpperCAmelCase ) __snake_case : Optional[int] = np.array(__UpperCAmelCase ) if reduce_labels: __snake_case : Tuple = 2_55 __snake_case : List[str] = label - 1 __snake_case : List[str] = 2_55 __snake_case : Optional[Any] = label != ignore_index __snake_case : str = np.not_equal(__UpperCAmelCase , __UpperCAmelCase ) __snake_case : Optional[Any] = pred_label[mask] __snake_case : Union[str, Any] = np.array(__UpperCAmelCase )[mask] __snake_case : Any = pred_label[pred_label == label] __snake_case : int = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0] __snake_case : List[str] = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0] __snake_case : Union[str, Any] = np.histogram(__UpperCAmelCase , bins=__UpperCAmelCase , range=(0, num_labels - 1) )[0] __snake_case : Any = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[Dict[int, int]] = None , __UpperCAmelCase : bool = False , ): __snake_case : Tuple = np.zeros((num_labels,) , dtype=np.floataa ) __snake_case : Any = np.zeros((num_labels,) , dtype=np.floataa ) __snake_case : str = np.zeros((num_labels,) , dtype=np.floataa ) __snake_case : Any = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(__UpperCAmelCase , __UpperCAmelCase ): __snake_case , __snake_case , __snake_case , __snake_case : Dict = intersect_and_union( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def UpperCAmelCase__( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : bool , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Dict[int, int]] = None , __UpperCAmelCase : bool = False , ): __snake_case , __snake_case , __snake_case , __snake_case : int = total_intersect_and_union( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # compute metrics __snake_case : Optional[int] = {} __snake_case : str = total_area_intersect.sum() / total_area_label.sum() __snake_case : List[Any] = total_area_intersect / total_area_union __snake_case : List[Any] = total_area_intersect / total_area_label __snake_case : Union[str, Any] = np.nanmean(__UpperCAmelCase ) __snake_case : Optional[Any] = np.nanmean(__UpperCAmelCase ) __snake_case : Dict = all_acc __snake_case : List[Any] = iou __snake_case : int = acc if nan_to_num is not None: __snake_case : Tuple = {metric: np.nan_to_num(__UpperCAmelCase , nan=__UpperCAmelCase ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __SCREAMING_SNAKE_CASE ( datasets.Metric): """simple docstring""" def lowercase_ ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { 'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), 'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ), } ) , reference_urls=[ 'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py' ] , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , ): __snake_case : Union[str, Any] = mean_iou( results=_UpperCAmelCase , gt_seg_maps=_UpperCAmelCase , num_labels=_UpperCAmelCase , ignore_index=_UpperCAmelCase , nan_to_num=_UpperCAmelCase , label_map=_UpperCAmelCase , reduce_labels=_UpperCAmelCase , ) return iou_result
679
from itertools import permutations def UpperCAmelCase__( __UpperCAmelCase : tuple ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __snake_case : Any = [7, 11, 13, 17] for i, test in enumerate(__UpperCAmelCase ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase__( __UpperCAmelCase : int = 10 ): return sum( int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) ) for num in permutations(range(__UpperCAmelCase ) ) if is_substring_divisible(__UpperCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
679
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = { '''configuration_trajectory_transformer''': [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrajectoryTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrajectoryTransformerModel''', '''TrajectoryTransformerPreTrainedModel''', '''load_tf_weights_in_trajectory_transformer''', ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
679
# Function to print upper half of diamond (pyramid) def UpperCAmelCase__( __UpperCAmelCase : List[str] ): for i in range(0 , __UpperCAmelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(' ' , end='' ) for _ in range(0 , i + 1 ): # printing stars print('* ' , end='' ) print() def UpperCAmelCase__( __UpperCAmelCase : List[str] ): for i in range(__UpperCAmelCase , 0 , -1 ): for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars print('* ' , end='' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(' ' , end='' ) def UpperCAmelCase__( __UpperCAmelCase : List[Any] ): if n <= 0: print(' ... .... nothing printing :(' ) return floyd(__UpperCAmelCase ) # upper half reverse_floyd(__UpperCAmelCase ) # lower half if __name__ == "__main__": print(r'''| /\ | |- | |- |--| |\ /| |-''') print(r'''|/ \| |- |_ |_ |__| | \/ | |_''') __magic_name__ = 1 while K: __magic_name__ = int(input('''enter the number and , and see the magic : ''')) print() pretty_print(user_number) __magic_name__ = int(input('''press 0 to exit... and 1 to continue...''')) print('''Good Bye...''')
679
1
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int ): __snake_case : List[str] = AutoConfig.from_pretrained(__UpperCAmelCase ) __snake_case : List[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__UpperCAmelCase ) __snake_case : List[str] = checkpoints.load_tax_checkpoint(__UpperCAmelCase ) __snake_case : List[str] = 'wi_0' in tax_model['target']['encoder']['layers_0']['mlp'] if config.model_type == "t5": __snake_case : Any = 'SelfAttention' if config.model_type == "longt5" and config.encoder_attention_type == "local": __snake_case : Optional[Any] = 'LocalSelfAttention' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : Union[str, Any] = 'TransientGlobalSelfAttention' else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): __snake_case : Optional[int] = F"""layers_{str(__UpperCAmelCase )}""" # Self-Attention __snake_case : Tuple = tax_model['target']['encoder'][layer_name]['attention']['key']['kernel'] __snake_case : List[Any] = tax_model['target']['encoder'][layer_name]['attention']['out']['kernel'] __snake_case : Dict = tax_model['target']['encoder'][layer_name]['attention']['query']['kernel'] __snake_case : Tuple = tax_model['target']['encoder'][layer_name]['attention']['value']['kernel'] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : List[Any] = tax_model['target']['encoder'][layer_name]['attention']['T5LayerNorm_0']['scale'] # Layer Normalization __snake_case : Any = tax_model['target']['encoder'][layer_name]['pre_attention_layer_norm']['scale'] if split_mlp_wi: __snake_case : Union[str, Any] = tax_model['target']['encoder'][layer_name]['mlp']['wi_0']['kernel'] __snake_case : str = tax_model['target']['encoder'][layer_name]['mlp']['wi_1']['kernel'] else: __snake_case : Optional[Any] = tax_model['target']['encoder'][layer_name]['mlp']['wi']['kernel'] __snake_case : str = tax_model['target']['encoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization __snake_case : List[str] = tax_model['target']['encoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning __snake_case : List[Any] = flax_model.params['encoder']['block'][str(__UpperCAmelCase )]['layer'] __snake_case : Optional[int] = tax_attention_key __snake_case : List[Any] = tax_attention_out __snake_case : Any = tax_attention_query __snake_case : int = tax_attention_value __snake_case : List[Any] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : Tuple = tax_global_layer_norm if split_mlp_wi: __snake_case : Tuple = tax_mlp_wi_a __snake_case : int = tax_mlp_wi_a else: __snake_case : Optional[int] = tax_mlp_wi __snake_case : List[str] = tax_mlp_wo __snake_case : List[Any] = tax_mlp_layer_norm __snake_case : str = flax_model_encoder_layer_block # Only for layer 0: __snake_case : Optional[int] = tax_model['target']['encoder']['relpos_bias']['rel_embedding'].T __snake_case : List[str] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __snake_case : Any = tax_model['target']['encoder']['side_relpos_bias']['rel_embedding'].T __snake_case : str = tax_encoder_global_rel_embedding # Assigning __snake_case : str = tax_model['target']['encoder']['encoder_norm']['scale'] __snake_case : int = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): __snake_case : Optional[Any] = F"""layers_{str(__UpperCAmelCase )}""" # Self-Attention __snake_case : List[str] = tax_model['target']['decoder'][layer_name]['self_attention']['key']['kernel'] __snake_case : Dict = tax_model['target']['decoder'][layer_name]['self_attention']['out']['kernel'] __snake_case : List[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['query']['kernel'] __snake_case : Optional[Any] = tax_model['target']['decoder'][layer_name]['self_attention']['value']['kernel'] # Layer Normalization __snake_case : Union[str, Any] = tax_model['target']['decoder'][layer_name]['pre_self_attention_layer_norm'][ 'scale' ] # Encoder-Decoder-Attention __snake_case : Optional[Any] = tax_model['target']['decoder'][layer_name]['encoder_decoder_attention'] __snake_case : Optional[int] = tax_enc_dec_attention_module['key']['kernel'] __snake_case : Optional[int] = tax_enc_dec_attention_module['out']['kernel'] __snake_case : Union[str, Any] = tax_enc_dec_attention_module['query']['kernel'] __snake_case : List[str] = tax_enc_dec_attention_module['value']['kernel'] # Layer Normalization __snake_case : Tuple = tax_model['target']['decoder'][layer_name]['pre_cross_attention_layer_norm']['scale'] # MLP if split_mlp_wi: __snake_case : Optional[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi_0']['kernel'] __snake_case : Tuple = tax_model['target']['decoder'][layer_name]['mlp']['wi_1']['kernel'] else: __snake_case : Optional[Any] = tax_model['target']['decoder'][layer_name]['mlp']['wi']['kernel'] __snake_case : int = tax_model['target']['decoder'][layer_name]['mlp']['wo']['kernel'] # Layer Normalization __snake_case : List[str] = tax_model['target']['decoder'][layer_name]['pre_mlp_layer_norm']['scale'] # Assigning __snake_case : int = flax_model.params['decoder']['block'][str(__UpperCAmelCase )]['layer'] __snake_case : Any = tax_attention_key __snake_case : Union[str, Any] = tax_attention_out __snake_case : List[str] = tax_attention_query __snake_case : Tuple = tax_attention_value __snake_case : List[str] = tax_pre_attention_layer_norm __snake_case : Dict = tax_enc_dec_attention_key __snake_case : List[str] = tax_enc_dec_attention_out __snake_case : Union[str, Any] = tax_enc_dec_attention_query __snake_case : Tuple = tax_enc_dec_attention_value __snake_case : Union[str, Any] = tax_cross_layer_norm if split_mlp_wi: __snake_case : str = tax_mlp_wi_a __snake_case : List[Any] = tax_mlp_wi_a else: __snake_case : Dict = tax_mlp_wi __snake_case : Dict = tax_mlp_wo __snake_case : Optional[Any] = txa_mlp_layer_norm __snake_case : Any = flax_model_decoder_layer_block # Decoder Normalization __snake_case : Dict = tax_model['target']['decoder']['decoder_norm']['scale'] __snake_case : Union[str, Any] = txa_decoder_norm # Only for layer 0: __snake_case : List[str] = tax_model['target']['decoder']['relpos_bias']['rel_embedding'].T __snake_case : Union[str, Any] = tax_decoder_rel_embedding # Token Embeddings __snake_case : Optional[Any] = tax_model['target']['token_embedder']['embedding'] __snake_case : Tuple = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: __snake_case : List[Any] = tax_model['target']['decoder']['logits_dense']['kernel'] flax_model.save_pretrained(__UpperCAmelCase ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.''' ) parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''') parser.add_argument( '''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.''' ) __magic_name__ = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
679
from timeit import timeit def UpperCAmelCase__( __UpperCAmelCase : int ): if number < 0: raise ValueError('the value of input must not be negative' ) __snake_case : Dict = 0 while number: number &= number - 1 result += 1 return result def UpperCAmelCase__( __UpperCAmelCase : int ): if number < 0: raise ValueError('the value of input must not be negative' ) __snake_case : Tuple = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def UpperCAmelCase__( ): def do_benchmark(__UpperCAmelCase : int ) -> None: __snake_case : Optional[Any] = 'import __main__ as z' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" ) __snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" ) __snake_case : Dict = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
679
1
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ): def wrapper(*__UpperCAmelCase : List[str] , **__UpperCAmelCase : int ): __snake_case : List[str] = timeit.default_timer() __snake_case : Any = func(*__UpperCAmelCase , **__UpperCAmelCase ) __snake_case : Optional[int] = timeit.default_timer() - starttime return delta __snake_case : Tuple = func.__name__ return wrapper def UpperCAmelCase__( __UpperCAmelCase : dict , __UpperCAmelCase : Optional[Any]=1_00 , __UpperCAmelCase : int=None ): __snake_case : str = [] __snake_case : int = seq_shapes or {} for i in range(__UpperCAmelCase ): __snake_case : Any = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__UpperCAmelCase , _ArrayXD ): __snake_case : Optional[Any] = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__UpperCAmelCase , datasets.Value ): if v.dtype == "string": __snake_case : str = 'The small grey turtle was surprisingly fast when challenged.' else: __snake_case : Tuple = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__UpperCAmelCase , datasets.Sequence ): while isinstance(__UpperCAmelCase , datasets.Sequence ): __snake_case : Union[str, Any] = v.feature __snake_case : str = seq_shapes[k] __snake_case : List[Any] = np.random.rand(*__UpperCAmelCase ).astype(v.dtype ) __snake_case : Optional[int] = data dummy_data.append((i, example) ) return dummy_data def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int]=1_00 , __UpperCAmelCase : Optional[Any]=None ): __snake_case : Optional[Any] = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase ) with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer: for key, record in dummy_data: __snake_case : int = features.encode_example(__UpperCAmelCase ) writer.write(__UpperCAmelCase ) __snake_case , __snake_case : str = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) __snake_case : Dict = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) ) return dataset
679
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ): try: __snake_case : Optional[int] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __snake_case : Union[str, Any] = default else: # KEY is set, convert it to True or False. try: __snake_case : Optional[Any] = strtobool(__UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value __magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False) __magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False) __magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True) __magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression __magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') __magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') __magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio __magic_name__ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam __magic_name__ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility __magic_name__ = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows __magic_name__ = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def UpperCAmelCase__( __UpperCAmelCase : Any ): try: import faiss # noqa except ImportError: __snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : List[str] ): try: import regex # noqa except ImportError: __snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ): try: import elasticsearch # noqa except ImportError: __snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Dict ): try: import sqlalchemy # noqa except ImportError: __snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ): if not config.TORCH_AVAILABLE: __snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Any ): if not config.TF_AVAILABLE: __snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : List[str] ): if not config.JAX_AVAILABLE: __snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Tuple ): if not config.PIL_AVAILABLE: __snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ): try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : Dict ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : Tuple ): try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ): def _require_spacy_model(__UpperCAmelCase : List[str] ): try: import spacy # noqa F401 spacy.load(__UpperCAmelCase ) except ImportError: return unittest.skip('test requires spacy' )(__UpperCAmelCase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase ) else: return test_case return _require_spacy_model def UpperCAmelCase__( __UpperCAmelCase : int ): try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : List[str] ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : Any ): if not _run_slow_tests or _run_slow_tests == 0: __snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Dict ): if not _run_local_tests or _run_local_tests == 0: __snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : int ): if not _run_packaged_tests or _run_packaged_tests == 0: __snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : str ): if not _run_remote_tests or _run_remote_tests == 0: __snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( *__UpperCAmelCase : Any ): def decorate(cls : List[str] ): for name, fn in cls.__dict__.items(): if callable(__UpperCAmelCase ) and name.startswith('test' ): for decorator in decorators: __snake_case : Optional[Any] = decorator(__UpperCAmelCase ) setattr(cls , __UpperCAmelCase , __UpperCAmelCase ) return cls return decorate class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" pass class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = 0 __UpperCAmelCase = 1 __UpperCAmelCase = 2 @contextmanager def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ): __snake_case : Optional[Any] = requests.Session().request def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ): # Change the url to an invalid url so that the connection hangs __snake_case : int = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) __snake_case : str = timeout try: return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __snake_case : Any = url __snake_case : Union[str, Any] = e.args[0] __snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),) __snake_case : str = (max_retry_error,) raise def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __UpperCAmelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __UpperCAmelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ): __snake_case : Dict = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir: try: os.chdir(__UpperCAmelCase ) yield finally: os.chdir(__UpperCAmelCase ) @contextmanager def UpperCAmelCase__( ): import gc gc.collect() __snake_case : Any = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def UpperCAmelCase__( ): import gc gc.collect() __snake_case : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ): return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() def UpperCAmelCase__( __UpperCAmelCase : List[str] ): import decorator from requests.exceptions import HTTPError def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ): try: return func(*__UpperCAmelCase , **__UpperCAmelCase ) except HTTPError as err: if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ): pytest.xfail(str(__UpperCAmelCase ) ) raise err return decorator.decorator(_wrapper , __UpperCAmelCase ) class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : int = returncode __snake_case : Tuple = stdout __snake_case : List[Any] = stderr async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ): while True: __snake_case : Optional[int] = await stream.readline() if line: callback(__UpperCAmelCase ) else: break async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ): if echo: print('\nRunning: ' , ' '.join(__UpperCAmelCase ) ) __snake_case : Tuple = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __snake_case : Any = [] __snake_case : Tuple = [] def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ): __snake_case : int = line.decode('utf-8' ).rstrip() sink.append(__UpperCAmelCase ) if not quiet: print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ), ] , timeout=__UpperCAmelCase , ) return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ): __snake_case : Any = asyncio.get_event_loop() __snake_case : List[str] = loop.run_until_complete( _stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) ) __snake_case : Dict = ' '.join(__UpperCAmelCase ) if result.returncode > 0: __snake_case : List[Any] = '\n'.join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F"""'{cmd_str}' produced no output.""" ) return result def UpperCAmelCase__( ): __snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M ) return int(__UpperCAmelCase ) def UpperCAmelCase__( ): __snake_case : Dict = 2_95_00 __snake_case : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
679
1
def UpperCAmelCase__( __UpperCAmelCase : int ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
679
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar __magic_name__ = TypeVar('''T''') class __SCREAMING_SNAKE_CASE ( Generic[T]): """simple docstring""" def __init__( self , _UpperCAmelCase ): __snake_case : Optional[Any] = data __snake_case : Node[T] | None = None def __str__( self ): return F"""{self.data}""" class __SCREAMING_SNAKE_CASE ( Generic[T]): """simple docstring""" def __init__( self ): __snake_case : Node[T] | None = None def __iter__( self ): __snake_case : List[str] = self.top while node: yield node.data __snake_case : Union[str, Any] = node.next def __str__( self ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self ): return len(tuple(iter(self ) ) ) def lowercase_ ( self ): return self.top is None def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Any = Node(_UpperCAmelCase ) if not self.is_empty(): __snake_case : Any = self.top __snake_case : Dict = node def lowercase_ ( self ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) __snake_case : Optional[int] = self.top __snake_case : Dict = self.top.next return pop_node.data def lowercase_ ( self ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowercase_ ( self ): __snake_case : Optional[int] = None if __name__ == "__main__": from doctest import testmod testmod()
679
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" def lowercase_ ( self ): __snake_case : Any = 'ZinengTang/tvlt-base' __snake_case : List[str] = tempfile.mkdtemp() def lowercase_ ( self , **_UpperCAmelCase ): return TvltImageProcessor.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def lowercase_ ( self , **_UpperCAmelCase ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def lowercase_ ( self ): shutil.rmtree(self.tmpdirname ) def lowercase_ ( self ): __snake_case : Dict = self.get_image_processor() __snake_case : Optional[int] = self.get_feature_extractor() __snake_case : Dict = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __snake_case : Union[str, Any] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , _UpperCAmelCase ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Any = self.get_image_processor() __snake_case : Optional[int] = self.get_feature_extractor() __snake_case : Optional[Any] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) __snake_case : Any = np.ones([12_000] ) __snake_case : str = feature_extractor(_UpperCAmelCase , return_tensors='np' ) __snake_case : Any = processor(audio=_UpperCAmelCase , return_tensors='np' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase_ ( self ): __snake_case : List[Any] = self.get_image_processor() __snake_case : int = self.get_feature_extractor() __snake_case : List[str] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) __snake_case : Dict = np.ones([3, 224, 224] ) __snake_case : Optional[Any] = image_processor(_UpperCAmelCase , return_tensors='np' ) __snake_case : Tuple = processor(images=_UpperCAmelCase , return_tensors='np' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowercase_ ( self ): __snake_case : str = self.get_image_processor() __snake_case : int = self.get_feature_extractor() __snake_case : List[Any] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) __snake_case : Optional[int] = np.ones([12_000] ) __snake_case : Any = np.ones([3, 224, 224] ) __snake_case : Any = processor(audio=_UpperCAmelCase , images=_UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] ) # test if it raises when no input is passed with pytest.raises(_UpperCAmelCase ): processor() def lowercase_ ( self ): __snake_case : int = self.get_image_processor() __snake_case : Any = self.get_feature_extractor() __snake_case : List[str] = TvltProcessor(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
679
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = ShapEPipeline __UpperCAmelCase = ["prompt"] __UpperCAmelCase = ["prompt"] __UpperCAmelCase = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] __UpperCAmelCase = False @property def lowercase_ ( self ): return 32 @property def lowercase_ ( self ): return 32 @property def lowercase_ ( self ): return self.time_input_dim * 4 @property def lowercase_ ( self ): return 8 @property def lowercase_ ( self ): __snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def lowercase_ ( self ): torch.manual_seed(0 ) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(_UpperCAmelCase ) @property def lowercase_ ( self ): torch.manual_seed(0 ) __snake_case : Any = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __snake_case : Dict = PriorTransformer(**_UpperCAmelCase ) return model @property def lowercase_ ( self ): torch.manual_seed(0 ) __snake_case : Tuple = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase ) return model def lowercase_ ( self ): __snake_case : Tuple = self.dummy_prior __snake_case : Dict = self.dummy_text_encoder __snake_case : Optional[int] = self.dummy_tokenizer __snake_case : str = self.dummy_renderer __snake_case : Tuple = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , ) __snake_case : Optional[int] = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ): if str(_UpperCAmelCase ).startswith('mps' ): __snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) else: __snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __snake_case : Tuple = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def lowercase_ ( self ): __snake_case : Optional[int] = 'cpu' __snake_case : Tuple = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase ) __snake_case : Any = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) __snake_case : Union[str, Any] = output.images[0] __snake_case : Tuple = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __snake_case : Dict = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase_ ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase_ ( self ): __snake_case : List[str] = torch_device == 'cpu' __snake_case : int = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , ) def lowercase_ ( self ): __snake_case : Dict = self.get_dummy_components() __snake_case : Any = self.pipeline_class(**_UpperCAmelCase ) __snake_case : Tuple = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __snake_case : int = 1 __snake_case : Optional[int] = 2 __snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase ) for key in inputs.keys(): if key in self.batch_params: __snake_case : Union[str, Any] = batch_size * [inputs[key]] __snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" def lowercase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ): __snake_case : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) __snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' ) __snake_case : List[str] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) __snake_case : Optional[Any] = pipe( 'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
679
1
from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __magic_name__ = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) self.check_model_type(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ): __snake_case , __snake_case : List[Any] = {}, {} if padding is not None: __snake_case : List[Any] = padding if truncation is not None: __snake_case : Optional[int] = truncation if top_k is not None: __snake_case : Optional[Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase ): if isinstance(_UpperCAmelCase , (Image.Image, str) ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ): __snake_case : List[Any] = {'image': image, 'question': question} else: __snake_case : List[str] = image __snake_case : Tuple = super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) return results def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False ): __snake_case : Union[str, Any] = load_image(inputs['image'] ) __snake_case : List[Any] = self.tokenizer( inputs['question'] , return_tensors=self.framework , padding=_UpperCAmelCase , truncation=_UpperCAmelCase ) __snake_case : Tuple = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) model_inputs.update(_UpperCAmelCase ) return model_inputs def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Union[str, Any] = self.model(**_UpperCAmelCase ) return model_outputs def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=5 ): if top_k > self.model.config.num_labels: __snake_case : Tuple = self.model.config.num_labels if self.framework == "pt": __snake_case : Tuple = model_outputs.logits.sigmoid()[0] __snake_case , __snake_case : Optional[int] = probs.topk(_UpperCAmelCase ) else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) __snake_case : int = scores.tolist() __snake_case : Any = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
679
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ): # Initialise PyTorch model __snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) __snake_case : int = TaForConditionalGeneration(__UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __magic_name__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
679
1
class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , _UpperCAmelCase = "" , _UpperCAmelCase = False ): # Mapping from the first character of the prefix of the node __snake_case : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word __snake_case : str = is_leaf __snake_case : str = prefix def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Any = 0 for q, w in zip(self.prefix , _UpperCAmelCase ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def lowercase_ ( self , _UpperCAmelCase ): for word in words: self.insert(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): # Case 1: If the word is the prefix of the node # Solution: We set the current node as leaf if self.prefix == word: __snake_case : str = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: __snake_case : int = RadixNode(prefix=_UpperCAmelCase , is_leaf=_UpperCAmelCase ) else: __snake_case : Dict = self.nodes[word[0]] __snake_case , __snake_case , __snake_case : str = incoming_node.match( _UpperCAmelCase ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(_UpperCAmelCase ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: __snake_case : int = remaining_prefix __snake_case : Optional[int] = self.nodes[matching_string[0]] __snake_case : int = RadixNode(_UpperCAmelCase , _UpperCAmelCase ) __snake_case : Optional[int] = aux_node if remaining_word == "": __snake_case : Tuple = True else: self.nodes[matching_string[0]].insert(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Optional[int] = self.nodes.get(word[0] , _UpperCAmelCase ) if not incoming_node: return False else: __snake_case , __snake_case , __snake_case : int = incoming_node.match( _UpperCAmelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Union[str, Any] = self.nodes.get(word[0] , _UpperCAmelCase ) if not incoming_node: return False else: __snake_case , __snake_case , __snake_case : Any = incoming_node.match( _UpperCAmelCase ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(_UpperCAmelCase ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: __snake_case : int = list(self.nodes.values() )[0] __snake_case : Optional[Any] = merging_node.is_leaf self.prefix += merging_node.prefix __snake_case : Any = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: __snake_case : Union[str, Any] = False # If there is 1 edge, we merge it with its child else: __snake_case : List[str] = list(incoming_node.nodes.values() )[0] __snake_case : List[Any] = merging_node.is_leaf incoming_node.prefix += merging_node.prefix __snake_case : Tuple = merging_node.nodes return True def lowercase_ ( self , _UpperCAmelCase = 0 ): if self.prefix != "": print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' ) for value in self.nodes.values(): value.print_tree(height + 1 ) def UpperCAmelCase__( ): __snake_case : Optional[int] = 'banana bananas bandana band apple all beast'.split() __snake_case : Tuple = RadixNode() root.insert_many(__UpperCAmelCase ) assert all(root.find(__UpperCAmelCase ) for word in words ) assert not root.find('bandanas' ) assert not root.find('apps' ) root.delete('all' ) assert not root.find('all' ) root.delete('banana' ) assert not root.find('banana' ) assert root.find('bananas' ) return True def UpperCAmelCase__( ): assert test_trie() def UpperCAmelCase__( ): __snake_case : Optional[Any] = RadixNode() __snake_case : Optional[int] = 'banana bananas bandanas bandana band apple all beast'.split() root.insert_many(__UpperCAmelCase ) print('Words:' , __UpperCAmelCase ) print('Tree:' ) root.print_tree() if __name__ == "__main__": main()
679
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) __magic_name__ = logging.getLogger(__name__) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ): __snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] ) __snake_case : Optional[Any] = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__(_UpperCAmelCase ) __snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase ) self.init_weights() __snake_case : str = 0 __snake_case : List[str] = 0 __snake_case : int = 0 __snake_case : Tuple = 0 def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Dict = threshold def lowercase_ ( self , _UpperCAmelCase ): __snake_case : List[Any] = patience def lowercase_ ( self ): __snake_case : Dict = 0 __snake_case : Dict = 0 def lowercase_ ( self ): __snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num __snake_case : int = ( F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(_UpperCAmelCase ) @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: __snake_case : Union[str, Any] = input_ids.size() elif inputs_embeds is not None: __snake_case : int = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) __snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if token_type_ids is None: __snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size() __snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) __snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase ) else: __snake_case : str = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers ) __snake_case : Any = self.embeddings( input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase ) __snake_case : List[str] = embedding_output if self.training: __snake_case : Dict = [] for i in range(self.config.num_hidden_layers ): __snake_case : str = self.encoder.adaptive_forward( _UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase ) __snake_case : Optional[Any] = self.pooler(_UpperCAmelCase ) __snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) ) res.append(_UpperCAmelCase ) elif self.patience == 0: # Use all layers for inference __snake_case : Dict = self.encoder( _UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) __snake_case : str = self.pooler(encoder_outputs[0] ) __snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )] else: __snake_case : List[str] = 0 __snake_case : str = None __snake_case : Tuple = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __snake_case : List[Any] = self.encoder.adaptive_forward( _UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase ) __snake_case : Any = self.pooler(_UpperCAmelCase ) __snake_case : int = output_layers[i](_UpperCAmelCase ) if regression: __snake_case : Optional[int] = logits.detach() if patient_result is not None: __snake_case : Dict = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __snake_case : Any = 0 else: __snake_case : str = logits.detach().argmax(dim=1 ) if patient_result is not None: __snake_case : List[str] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ): patient_counter += 1 else: __snake_case : Dict = 0 __snake_case : str = logits if patient_counter == self.patience: break __snake_case : str = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__(_UpperCAmelCase ) __snake_case : List[str] = config.num_labels __snake_case : Dict = BertModelWithPabee(_UpperCAmelCase ) __snake_case : int = nn.Dropout(config.hidden_dropout_prob ) __snake_case : Optional[int] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ): __snake_case : List[str] = self.bert( input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __snake_case : int = (logits[-1],) if labels is not None: __snake_case : List[Any] = None __snake_case : Optional[int] = 0 for ix, logits_item in enumerate(_UpperCAmelCase ): if self.num_labels == 1: # We are doing regression __snake_case : List[str] = MSELoss() __snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __snake_case : List[str] = CrossEntropyLoss() __snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __snake_case : List[Any] = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __snake_case : int = (total_loss / total_weights,) + outputs return outputs
679
1
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple ): # Return True if there is node that has not iterated. __snake_case : List[Any] = [False] * len(__UpperCAmelCase ) __snake_case : Union[str, Any] = [] queue.append(__UpperCAmelCase ) __snake_case : int = True while queue: __snake_case : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(__UpperCAmelCase ) __snake_case : str = True __snake_case : Any = u return visited[t] def UpperCAmelCase__( __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] ): # This array is filled by BFS and to store path __snake_case : List[str] = [-1] * (len(__UpperCAmelCase )) __snake_case : List[str] = 0 while bfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): __snake_case : List[str] = float('Inf' ) __snake_case : Optional[Any] = sink while s != source: # Find the minimum value in select path __snake_case : str = min(__UpperCAmelCase , graph[parent[s]][s] ) __snake_case : Optional[Any] = parent[s] max_flow += path_flow __snake_case : List[str] = sink while v != source: __snake_case : str = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow __snake_case : str = parent[v] return max_flow __magic_name__ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __magic_name__ , __magic_name__ = 0, 5 print(ford_fulkerson(graph, source, sink))
679
def UpperCAmelCase__( __UpperCAmelCase : str ): if not all(x.isalpha() for x in string ): raise ValueError('String must only contain alphabetic characters.' ) __snake_case : str = sorted(string.lower() ) return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) ) if __name__ == "__main__": __magic_name__ = input('''Enter a string ''').strip() __magic_name__ = is_isogram(input_str) print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
679
1
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = BlenderbotConfig __UpperCAmelCase = {} __UpperCAmelCase = "gelu" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , ): __snake_case : List[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : Any = seq_length __snake_case : Any = is_training __snake_case : Any = use_labels __snake_case : str = vocab_size __snake_case : Optional[Any] = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : Union[str, Any] = num_attention_heads __snake_case : str = intermediate_size __snake_case : int = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : List[str] = max_position_embeddings __snake_case : List[str] = eos_token_id __snake_case : Tuple = pad_token_id __snake_case : int = bos_token_id def lowercase_ ( self ): __snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __snake_case : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __snake_case : Dict = tf.concat([input_ids, eos_tensor] , axis=1 ) __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __snake_case : List[str] = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : int = TFBlenderbotModel(config=_UpperCAmelCase ).get_decoder() __snake_case : Union[str, Any] = inputs_dict['input_ids'] __snake_case : List[Any] = input_ids[:1, :] __snake_case : List[Any] = inputs_dict['attention_mask'][:1, :] __snake_case : Optional[Any] = inputs_dict['head_mask'] __snake_case : int = 1 # first forward pass __snake_case : Union[str, Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) __snake_case , __snake_case : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __snake_case : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) __snake_case : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __snake_case : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] __snake_case : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __snake_case : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx] __snake_case : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-3 ) def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Union[str, Any]=None , ): if attention_mask is None: __snake_case : Tuple = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: __snake_case : List[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: __snake_case : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __snake_case : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () __UpperCAmelCase = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () __UpperCAmelCase = ( { "conversational": TFBlenderbotForConditionalGeneration, "feature-extraction": TFBlenderbotModel, "summarization": TFBlenderbotForConditionalGeneration, "text2text-generation": TFBlenderbotForConditionalGeneration, "translation": TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False def lowercase_ ( self ): __snake_case : Optional[Any] = TFBlenderbotModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase ) def lowercase_ ( self ): self.config_tester.run_common_tests() def lowercase_ ( self ): __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_tokenizers @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" __UpperCAmelCase = ["My friends are cool but they eat too many carbs."] __UpperCAmelCase = "facebook/blenderbot-400M-distill" @cached_property def lowercase_ ( self ): return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def lowercase_ ( self ): __snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def lowercase_ ( self ): __snake_case : Optional[Any] = self.tokenizer(self.src_text , return_tensors='tf' ) __snake_case : List[Any] = self.model.generate( model_inputs.input_ids , ) __snake_case : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_UpperCAmelCase )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
679
from ....configuration_utils import PretrainedConfig from ....utils import logging __magic_name__ = logging.get_logger(__name__) # TODO: upload to AWS __magic_name__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "retribert" def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __snake_case : Tuple = vocab_size __snake_case : Optional[int] = hidden_size __snake_case : str = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Any = hidden_act __snake_case : List[Any] = intermediate_size __snake_case : Dict = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Optional[int] = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = layer_norm_eps __snake_case : int = share_encoders __snake_case : Optional[Any] = projection_dim
679
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar __magic_name__ = TypeVar('''T''') class __SCREAMING_SNAKE_CASE ( Generic[T]): """simple docstring""" def __init__( self , _UpperCAmelCase ): __snake_case : Optional[Any] = data __snake_case : Node[T] | None = None def __str__( self ): return F"""{self.data}""" class __SCREAMING_SNAKE_CASE ( Generic[T]): """simple docstring""" def __init__( self ): __snake_case : Node[T] | None = None def __iter__( self ): __snake_case : List[str] = self.top while node: yield node.data __snake_case : Union[str, Any] = node.next def __str__( self ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self ): return len(tuple(iter(self ) ) ) def lowercase_ ( self ): return self.top is None def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Any = Node(_UpperCAmelCase ) if not self.is_empty(): __snake_case : Any = self.top __snake_case : Dict = node def lowercase_ ( self ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) __snake_case : Optional[int] = self.top __snake_case : Dict = self.top.next return pop_node.data def lowercase_ ( self ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowercase_ ( self ): __snake_case : Optional[int] = None if __name__ == "__main__": from doctest import testmod testmod()
679
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
679
1
def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ): return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def UpperCAmelCase__( __UpperCAmelCase : dict[int, list[int]] ): __snake_case : Tuple = 0 __snake_case : List[str] = len(__UpperCAmelCase ) # No of vertices in graph __snake_case : str = [0] * n __snake_case : Union[str, Any] = [False] * n def dfs(__UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] ): __snake_case : Dict = True __snake_case : Dict = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , id_ ) __snake_case : Dict = min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge __snake_case : List[str] = min(low[at] , low[to] ) __snake_case : list[tuple[int, int]] = [] for i in range(__UpperCAmelCase ): if not visited[i]: dfs(__UpperCAmelCase , -1 , __UpperCAmelCase , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
679
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def lowercase_ ( self ): __snake_case : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) ) class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ): __snake_case : List[str] = parent __snake_case : Tuple = batch_size __snake_case : str = image_size __snake_case : Union[str, Any] = patch_size __snake_case : Optional[int] = num_channels __snake_case : List[str] = last_hidden_size __snake_case : Optional[Any] = num_attention_heads __snake_case : Dict = hidden_act __snake_case : List[Any] = conv_kernel_size __snake_case : int = output_stride __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Dict = attention_probs_dropout_prob __snake_case : Any = classifier_dropout_prob __snake_case : str = use_labels __snake_case : Optional[Any] = is_training __snake_case : Dict = num_labels __snake_case : str = initializer_range __snake_case : Union[str, Any] = scope def lowercase_ ( self ): __snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : str = None __snake_case : Dict = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ ( self ): return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : List[Any] = model(_UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Tuple = self.num_labels __snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Optional[Any] = self.num_labels __snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : Tuple = model(_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self ): __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs __snake_case : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) __UpperCAmelCase = ( { "feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def lowercase_ ( self ): __snake_case : Dict = MobileViTModelTester(self ) __snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def lowercase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def lowercase_ ( self ): pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def lowercase_ ( self ): pass @unittest.skip(reason='MobileViT does not output attentions' ) def lowercase_ ( self ): pass def lowercase_ ( self ): __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Tuple = model_class(_UpperCAmelCase ) __snake_case : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : List[str] = [*signature.parameters.keys()] __snake_case : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ): pass def lowercase_ ( self ): __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowercase_ ( self ): def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : str = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): __snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __snake_case : Optional[Any] = outputs.hidden_states __snake_case : str = 5 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __snake_case : Optional[Any] = 2 for i in range(len(_UpperCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Tuple = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase ) @slow def lowercase_ ( self ): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def UpperCAmelCase__( ): __snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" @cached_property def lowercase_ ( self ): return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def lowercase_ ( self ): __snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : str = prepare_img() __snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : Tuple = model(**_UpperCAmelCase ) # verify the logits __snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowercase_ ( self ): __snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : str = model.to(_UpperCAmelCase ) __snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : Optional[int] = prepare_img() __snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : int = model(**_UpperCAmelCase ) __snake_case : int = outputs.logits # verify the logits __snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , _UpperCAmelCase ) __snake_case : Optional[int] = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=_UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowercase_ ( self ): __snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : str = model.to(_UpperCAmelCase ) __snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : Any = prepare_img() __snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : Optional[Any] = model(**_UpperCAmelCase ) __snake_case : str = outputs.logits.detach().cpu() __snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] ) __snake_case : List[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , _UpperCAmelCase ) __snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase ) __snake_case : List[str] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
679
1
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __magic_name__ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __magic_name__ = {'''facebook/blenderbot_small-90M''': 512} def UpperCAmelCase__( __UpperCAmelCase : str ): __snake_case : Optional[Any] = set() __snake_case : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __snake_case : List[Any] = char __snake_case : Dict = set(__UpperCAmelCase ) return pairs class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = ["input_ids", "attention_mask"] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="__start__" , _UpperCAmelCase="__end__" , _UpperCAmelCase="__unk__" , _UpperCAmelCase="__null__" , **_UpperCAmelCase , ): super().__init__(unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , **_UpperCAmelCase ) with open(_UpperCAmelCase , encoding='utf-8' ) as vocab_handle: __snake_case : List[Any] = json.load(_UpperCAmelCase ) __snake_case : Tuple = {v: k for k, v in self.encoder.items()} with open(_UpperCAmelCase , encoding='utf-8' ) as merges_handle: __snake_case : List[Any] = merges_handle.read().split('\n' )[1:-1] __snake_case : List[Any] = [tuple(merge.split() ) for merge in merges] __snake_case : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __snake_case : int = {} @property def lowercase_ ( self ): return len(self.encoder ) def lowercase_ ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def lowercase_ ( self , _UpperCAmelCase ): if token in self.cache: return self.cache[token] __snake_case : Tuple = re.sub('([.,!?()])' , R' \1' , _UpperCAmelCase ) __snake_case : List[str] = re.sub('(\')' , R' \1 ' , _UpperCAmelCase ) __snake_case : Optional[Any] = re.sub(R'\s{2,}' , ' ' , _UpperCAmelCase ) if "\n" in token: __snake_case : int = token.replace('\n' , ' __newln__' ) __snake_case : Optional[int] = token.split(' ' ) __snake_case : Tuple = [] for token in tokens: if not len(_UpperCAmelCase ): continue __snake_case : List[str] = token.lower() __snake_case : Optional[Any] = tuple(_UpperCAmelCase ) __snake_case : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + '</w>'] ) __snake_case : Optional[Any] = get_pairs(_UpperCAmelCase ) if not pairs: words.append(_UpperCAmelCase ) continue while True: __snake_case : Union[str, Any] = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float('inf' ) ) ) if bigram not in self.bpe_ranks: break __snake_case , __snake_case : List[Any] = bigram __snake_case : Optional[int] = [] __snake_case : Optional[int] = 0 while i < len(_UpperCAmelCase ): try: __snake_case : List[Any] = word.index(_UpperCAmelCase , _UpperCAmelCase ) new_word.extend(word[i:j] ) __snake_case : Optional[Any] = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __snake_case : int = tuple(_UpperCAmelCase ) __snake_case : Dict = new_word if len(_UpperCAmelCase ) == 1: break else: __snake_case : Dict = get_pairs(_UpperCAmelCase ) __snake_case : Union[str, Any] = '@@ '.join(_UpperCAmelCase ) __snake_case : Optional[int] = word[:-4] __snake_case : Union[str, Any] = word words.append(_UpperCAmelCase ) return " ".join(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Any = [] __snake_case : int = re.findall(R'\S+\n?' , _UpperCAmelCase ) for token in words: split_tokens.extend(list(self.bpe(_UpperCAmelCase ).split(' ' ) ) ) return split_tokens def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Dict = token.lower() return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def lowercase_ ( self , _UpperCAmelCase ): return self.decoder.get(_UpperCAmelCase , self.unk_token ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = ' '.join(_UpperCAmelCase ).replace('@@ ' , '' ).strip() return out_string def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __snake_case : int = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __snake_case : str = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + '\n' ) __snake_case : str = 0 with open(_UpperCAmelCase , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) __snake_case : Union[str, Any] = token_index writer.write(' '.join(_UpperCAmelCase ) + '\n' ) index += 1 return vocab_file, merge_file
679
def UpperCAmelCase__( __UpperCAmelCase : int | float | str ): try: __snake_case : int = float(__UpperCAmelCase ) except ValueError: raise ValueError('Please enter a valid number' ) __snake_case : Any = decimal - int(__UpperCAmelCase ) if fractional_part == 0: return int(__UpperCAmelCase ), 1 else: __snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] ) __snake_case : Tuple = int(decimal * (10**number_of_frac_digits) ) __snake_case : List[Any] = 10**number_of_frac_digits __snake_case , __snake_case : List[Any] = denominator, numerator while True: __snake_case : Any = dividend % divisor if remainder == 0: break __snake_case , __snake_case : Optional[int] = divisor, remainder __snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor return int(__UpperCAmelCase ), int(__UpperCAmelCase ) if __name__ == "__main__": print(F'''{decimal_to_fraction(2) = }''') print(F'''{decimal_to_fraction(89.0) = }''') print(F'''{decimal_to_fraction("67") = }''') print(F'''{decimal_to_fraction("45.0") = }''') print(F'''{decimal_to_fraction(1.5) = }''') print(F'''{decimal_to_fraction("6.25") = }''') print(F'''{decimal_to_fraction("78td") = }''')
679
1
def UpperCAmelCase__( __UpperCAmelCase : int | float | str ): try: __snake_case : int = float(__UpperCAmelCase ) except ValueError: raise ValueError('Please enter a valid number' ) __snake_case : Any = decimal - int(__UpperCAmelCase ) if fractional_part == 0: return int(__UpperCAmelCase ), 1 else: __snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] ) __snake_case : Tuple = int(decimal * (10**number_of_frac_digits) ) __snake_case : List[Any] = 10**number_of_frac_digits __snake_case , __snake_case : List[Any] = denominator, numerator while True: __snake_case : Any = dividend % divisor if remainder == 0: break __snake_case , __snake_case : Optional[int] = divisor, remainder __snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor return int(__UpperCAmelCase ), int(__UpperCAmelCase ) if __name__ == "__main__": print(F'''{decimal_to_fraction(2) = }''') print(F'''{decimal_to_fraction(89.0) = }''') print(F'''{decimal_to_fraction("67") = }''') print(F'''{decimal_to_fraction("45.0") = }''') print(F'''{decimal_to_fraction(1.5) = }''') print(F'''{decimal_to_fraction("6.25") = }''') print(F'''{decimal_to_fraction("78td") = }''')
679
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') __magic_name__ = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __UpperCAmelCase = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def lowercase_ ( self ): if self.train_file is not None: __snake_case : Union[str, Any] = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __snake_case : List[str] = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = True __UpperCAmelCase = None __UpperCAmelCase = None def __call__( self , _UpperCAmelCase ): __snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels' __snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features] __snake_case : List[Any] = len(_UpperCAmelCase ) __snake_case : Union[str, Any] = len(features[0]['input_ids'] ) __snake_case : Union[str, Any] = [ [{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features ] __snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) ) __snake_case : Optional[Any] = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten __snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()} # Add back labels __snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) return batch def UpperCAmelCase__( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __snake_case : Tuple = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) datasets.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __snake_case : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __snake_case : Optional[int] = {} if data_args.train_file is not None: __snake_case : Optional[int] = data_args.train_file if data_args.validation_file is not None: __snake_case : int = data_args.validation_file __snake_case : int = data_args.train_file.split('.' )[-1] __snake_case : Tuple = load_dataset( __UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __snake_case : Optional[int] = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __snake_case : str = [F"""ending{i}""" for i in range(4 )] __snake_case : Optional[Any] = 'sent1' __snake_case : Tuple = 'sent2' if data_args.max_seq_length is None: __snake_case : List[Any] = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) __snake_case : List[Any] = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(__UpperCAmelCase : Tuple ): __snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]] __snake_case : Union[str, Any] = examples[question_header_name] __snake_case : Optional[int] = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase ) ] # Flatten out __snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) ) __snake_case : int = list(chain(*__UpperCAmelCase ) ) # Tokenize __snake_case : Tuple = tokenizer( __UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) __snake_case : Optional[Any] = raw_datasets['train'] if data_args.max_train_samples is not None: __snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples ) __snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): __snake_case : int = train_dataset.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) __snake_case : Optional[Any] = raw_datasets['validation'] if data_args.max_eval_samples is not None: __snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples ) __snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): __snake_case : List[Any] = eval_dataset.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __snake_case : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(__UpperCAmelCase : int ): __snake_case , __snake_case : Union[str, Any] = eval_predictions __snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __snake_case : List[str] = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , ) # Training if training_args.do_train: __snake_case : Dict = None if training_args.resume_from_checkpoint is not None: __snake_case : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: __snake_case : List[str] = last_checkpoint __snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __snake_case : List[Any] = train_result.metrics __snake_case : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase ) ) __snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics('train' , __UpperCAmelCase ) trainer.save_metrics('train' , __UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : Dict = trainer.evaluate() __snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase ) __snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics('eval' , __UpperCAmelCase ) trainer.save_metrics('eval' , __UpperCAmelCase ) __snake_case : List[Any] = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
679
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ = { '''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''], '''tokenization_m2m_100''': ['''M2M100Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''', '''M2M100ForConditionalGeneration''', '''M2M100Model''', '''M2M100PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
679
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = '''▁''' __magic_name__ = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } __magic_name__ = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } __magic_name__ = { '''facebook/s2t-small-librispeech-asr''': 1_024, } __magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] __magic_name__ = {'''mustc''': MUSTC_LANGS} class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = MAX_MODEL_INPUT_SIZES __UpperCAmelCase = ["input_ids", "attention_mask"] __UpperCAmelCase = [] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ): __snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __snake_case : Dict = do_upper_case __snake_case : Optional[Any] = do_lower_case __snake_case : List[Any] = load_json(_UpperCAmelCase ) __snake_case : Dict = {v: k for k, v in self.encoder.items()} __snake_case : Optional[Any] = spm_file __snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs ) if lang_codes is not None: __snake_case : Optional[Any] = lang_codes __snake_case : int = LANGUAGES[lang_codes] __snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs] __snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs} __snake_case : Dict = self.lang_tokens __snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __snake_case : Optional[int] = {} @property def lowercase_ ( self ): return len(self.encoder ) @property def lowercase_ ( self ): return self._tgt_lang @tgt_lang.setter def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = new_tgt_lang self.set_tgt_lang_special_tokens(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Tuple = self.lang_code_to_id[tgt_lang] __snake_case : Optional[Any] = [lang_code_id] def lowercase_ ( self , _UpperCAmelCase ): return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] ) def lowercase_ ( self , _UpperCAmelCase ): return self.decoder.get(_UpperCAmelCase , self.unk_token ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = [] __snake_case : Any = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __snake_case : Dict = self.sp_model.decode(_UpperCAmelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __snake_case : Any = [] else: current_sub_tokens.append(_UpperCAmelCase ) __snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) __snake_case : Union[str, Any] = [1] * len(self.prefix_tokens ) __snake_case : Optional[Any] = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def lowercase_ ( self ): __snake_case : List[Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __snake_case : int = self.__dict__.copy() __snake_case : str = None return state def __setstate__( self , _UpperCAmelCase ): __snake_case : List[Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __snake_case : Optional[int] = {} __snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __snake_case : str = Path(_UpperCAmelCase ) assert save_dir.is_dir(), F"""{save_directory} should be a directory""" __snake_case : int = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) __snake_case : Union[str, Any] = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , _UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(_UpperCAmelCase , 'wb' ) as fi: __snake_case : List[str] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (str(_UpperCAmelCase ), str(_UpperCAmelCase )) def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ): __snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase ) spm.Load(str(__UpperCAmelCase ) ) return spm def UpperCAmelCase__( __UpperCAmelCase : str ): with open(__UpperCAmelCase , 'r' ) as f: return json.load(__UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ): with open(__UpperCAmelCase , 'w' ) as f: json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
679
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , ): __snake_case : List[Any] = size if size is not None else {'shortest_edge': 20} __snake_case : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18} __snake_case : int = parent __snake_case : Optional[Any] = batch_size __snake_case : Optional[int] = num_channels __snake_case : Tuple = image_size __snake_case : List[str] = min_resolution __snake_case : Dict = max_resolution __snake_case : List[str] = do_resize __snake_case : List[Any] = size __snake_case : Dict = do_center_crop __snake_case : int = crop_size def lowercase_ ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = MobileNetVaImageProcessor if is_vision_available() else None def lowercase_ ( self ): __snake_case : Union[str, Any] = MobileNetVaImageProcessingTester(self ) @property def lowercase_ ( self ): return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self ): __snake_case : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'crop_size' ) ) def lowercase_ ( self ): __snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) __snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def lowercase_ ( self ): pass def lowercase_ ( self ): # Initialize image_processing __snake_case : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input __snake_case : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : Union[str, Any] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowercase_ ( self ): # Initialize image_processing __snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input __snake_case : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def lowercase_ ( self ): # Initialize image_processing __snake_case : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input __snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched __snake_case : str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
679
def UpperCAmelCase__( __UpperCAmelCase : list ): __snake_case : List[Any] = len(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: __snake_case , __snake_case : int = arr[i + 1], arr[i] return arr if __name__ == "__main__": __magic_name__ = list(range(10, 0, -1)) print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
679
1
from itertools import permutations def UpperCAmelCase__( __UpperCAmelCase : tuple ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __snake_case : Any = [7, 11, 13, 17] for i, test in enumerate(__UpperCAmelCase ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase__( __UpperCAmelCase : int = 10 ): return sum( int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) ) for num in permutations(range(__UpperCAmelCase ) ) if is_substring_divisible(__UpperCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
679
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __magic_name__ = '''pt''' elif is_tf_available(): __magic_name__ = '''tf''' else: __magic_name__ = '''jax''' class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = PerceiverTokenizer __UpperCAmelCase = False def lowercase_ ( self ): super().setUp() __snake_case : str = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase_ ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def lowercase_ ( self , **_UpperCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __snake_case : List[Any] = [] for i in range(len(_UpperCAmelCase ) ): try: __snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) __snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) ) __snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: __snake_case : List[str] = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: __snake_case : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __snake_case : List[Any] = [t[0] for t in toks] # Ensure consistency __snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: __snake_case : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: __snake_case : List[Any] = ' ' + output_txt __snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def lowercase_ ( self ): __snake_case : List[Any] = self.perceiver_tokenizer __snake_case : Dict = 'Unicode €.' __snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase ) __snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding __snake_case : int = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' ) __snake_case : Optional[Any] = tokenizer('e è é ê ë' ) __snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding __snake_case : str = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def lowercase_ ( self ): __snake_case : Union[str, Any] = self.perceiver_tokenizer __snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on __snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) if FRAMEWORK != "jax": __snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: __snake_case : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def lowercase_ ( self ): __snake_case : Dict = self.perceiver_tokenizer __snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _UpperCAmelCase ) self.assertIn('attention_mask' , _UpperCAmelCase ) self.assertNotIn('decoder_input_ids' , _UpperCAmelCase ) self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : List[str] = self.perceiver_tokenizer __snake_case : Tuple = [ 'Summary of the text.', 'Another summary.', ] __snake_case : int = tokenizer( text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def lowercase_ ( self ): # safety check on max_len default value so we are sure the test works __snake_case : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running' __snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) __snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) __snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) __snake_case : Dict = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __snake_case : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) __snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) __snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __snake_case : Any = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __snake_case : List[str] = json.load(_UpperCAmelCase ) __snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )] __snake_case : Dict = added_tokens_extra_ids + [ 'an_additional_special_token' ] __snake_case : List[Any] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Optional[Any] = tokenizer_class.from_pretrained( _UpperCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )] __snake_case : str = tokenizer_class.from_pretrained( _UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def lowercase_ ( self ): __snake_case : Tuple = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '�' ) def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens __snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] __snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
679
1
from ... import PretrainedConfig __magic_name__ = { '''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP __UpperCAmelCase = "nezha" def __init__( self , _UpperCAmelCase=21_128 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , **_UpperCAmelCase , ): super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __snake_case : str = vocab_size __snake_case : Dict = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : Optional[int] = num_attention_heads __snake_case : List[str] = hidden_act __snake_case : Any = intermediate_size __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : str = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Optional[int] = max_relative_position __snake_case : List[Any] = type_vocab_size __snake_case : List[Any] = initializer_range __snake_case : int = layer_norm_eps __snake_case : Optional[int] = classifier_dropout __snake_case : str = use_cache
679
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = None # Automatically constructed __UpperCAmelCase = "dict" __UpperCAmelCase = None __UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase) def __call__( self ): return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase_ ( self ): from .features import Value return {k: Value('string' ) for k in sorted(self.languages )} @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None # Automatically constructed __UpperCAmelCase = "dict" __UpperCAmelCase = None __UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase) def lowercase_ ( self ): __snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None __snake_case : Optional[Any] = len(self.languages ) if self.languages else None def __call__( self ): return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Optional[int] = set(self.languages ) if self.languages and set(_UpperCAmelCase ) - lang_set: raise ValueError( F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __snake_case : Any = [] for lang, text in translation_dict.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) ) return {"language": languages, "translation": translations} def lowercase_ ( self ): from .features import Sequence, Value return { "language": Sequence(Value('string' ) ), "translation": Sequence(Value('string' ) ), }
679
1
def UpperCAmelCase__( __UpperCAmelCase : int = 10**9 ): __snake_case : int = 1 __snake_case : Optional[Any] = 2 __snake_case : List[Any] = 0 __snake_case : int = 0 __snake_case : Union[str, Any] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __snake_case : Any = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'''{solution() = }''')
679
from __future__ import annotations __magic_name__ = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ): __snake_case : Optional[int] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) ) ] # the reference grid __snake_case : List[str] = 1 __snake_case : str = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) ) ] # the action grid __snake_case : Dict = init[0] __snake_case : List[str] = init[1] __snake_case : Optional[Any] = 0 __snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell __snake_case : Any = [[f, g, x, y]] __snake_case : List[str] = False # flag that is set when search is complete __snake_case : str = False # flag set if we can't find expand while not found and not resign: if len(__UpperCAmelCase ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __snake_case : List[Any] = cell.pop() __snake_case : Optional[int] = next_cell[2] __snake_case : int = next_cell[3] __snake_case : Optional[Any] = next_cell[1] if x == goal[0] and y == goal[1]: __snake_case : Union[str, Any] = True else: for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions __snake_case : Tuple = x + DIRECTIONS[i][0] __snake_case : Tuple = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __snake_case : List[str] = g + cost __snake_case : Optional[Any] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __snake_case : Dict = 1 __snake_case : Any = i __snake_case : Tuple = [] __snake_case : Dict = goal[0] __snake_case : Optional[int] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __snake_case : Tuple = x - DIRECTIONS[action[x][y]][0] __snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1] __snake_case : Tuple = xa __snake_case : List[str] = ya invpath.append([x, y] ) __snake_case : Dict = [] for i in range(len(__UpperCAmelCase ) ): path.append(invpath[len(__UpperCAmelCase ) - 1 - i] ) return path, action if __name__ == "__main__": __magic_name__ = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] __magic_name__ = [0, 0] # all coordinates are given in format [y,x] __magic_name__ = [len(grid) - 1, len(grid[0]) - 1] __magic_name__ = 1 # the cost map which pushes the path closer to the goal __magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): __magic_name__ = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map __magic_name__ = 99 __magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
679
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "wav2vec2" def __init__( self , _UpperCAmelCase=32 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase="group" , _UpperCAmelCase="gelu" , _UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , _UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , _UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , _UpperCAmelCase=False , _UpperCAmelCase=128 , _UpperCAmelCase=16 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=0.05 , _UpperCAmelCase=10 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0 , _UpperCAmelCase=10 , _UpperCAmelCase=0 , _UpperCAmelCase=320 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=100 , _UpperCAmelCase=256 , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase="sum" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=256 , _UpperCAmelCase=(512, 512, 512, 512, 1_500) , _UpperCAmelCase=(5, 3, 3, 1, 1) , _UpperCAmelCase=(1, 2, 3, 1, 1) , _UpperCAmelCase=512 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=3 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase ) __snake_case : List[str] = hidden_size __snake_case : int = feat_extract_norm __snake_case : str = feat_extract_activation __snake_case : Union[str, Any] = list(_UpperCAmelCase ) __snake_case : Optional[Any] = list(_UpperCAmelCase ) __snake_case : Dict = list(_UpperCAmelCase ) __snake_case : Any = conv_bias __snake_case : List[Any] = num_conv_pos_embeddings __snake_case : Union[str, Any] = num_conv_pos_embedding_groups __snake_case : int = len(self.conv_dim ) __snake_case : Union[str, Any] = num_hidden_layers __snake_case : List[str] = intermediate_size __snake_case : List[Any] = hidden_act __snake_case : Optional[Any] = num_attention_heads __snake_case : Tuple = hidden_dropout __snake_case : Optional[int] = attention_dropout __snake_case : str = activation_dropout __snake_case : Optional[int] = feat_proj_dropout __snake_case : Optional[int] = final_dropout __snake_case : str = layerdrop __snake_case : List[Any] = layer_norm_eps __snake_case : Dict = initializer_range __snake_case : str = vocab_size __snake_case : Dict = do_stable_layer_norm __snake_case : Any = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __snake_case : str = apply_spec_augment __snake_case : Tuple = mask_time_prob __snake_case : int = mask_time_length __snake_case : List[str] = mask_time_min_masks __snake_case : List[str] = mask_feature_prob __snake_case : Union[str, Any] = mask_feature_length __snake_case : Optional[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations __snake_case : Optional[int] = num_codevectors_per_group __snake_case : Any = num_codevector_groups __snake_case : str = contrastive_logits_temperature __snake_case : List[Any] = feat_quantizer_dropout __snake_case : int = num_negatives __snake_case : Dict = codevector_dim __snake_case : Tuple = proj_codevector_dim __snake_case : Optional[Any] = diversity_loss_weight # ctc loss __snake_case : str = ctc_loss_reduction __snake_case : List[str] = ctc_zero_infinity # adapter __snake_case : Optional[Any] = add_adapter __snake_case : Dict = adapter_kernel_size __snake_case : List[str] = adapter_stride __snake_case : Optional[int] = num_adapter_layers __snake_case : Any = output_hidden_size or hidden_size __snake_case : Union[str, Any] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. __snake_case : Optional[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __snake_case : Dict = list(_UpperCAmelCase ) __snake_case : Optional[int] = list(_UpperCAmelCase ) __snake_case : Optional[Any] = list(_UpperCAmelCase ) __snake_case : Any = xvector_output_dim @property def lowercase_ ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
679
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "instructblip_vision_model" def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) __snake_case : Optional[Any] = hidden_size __snake_case : Any = intermediate_size __snake_case : str = num_hidden_layers __snake_case : Any = num_attention_heads __snake_case : int = patch_size __snake_case : Dict = image_size __snake_case : Any = initializer_range __snake_case : List[Any] = attention_dropout __snake_case : Optional[Any] = layer_norm_eps __snake_case : Optional[int] = hidden_act __snake_case : int = qkv_bias @classmethod def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ): cls._set_token_in_kwargs(_UpperCAmelCase ) __snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __snake_case : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "instructblip_qformer" def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __snake_case : Union[str, Any] = vocab_size __snake_case : List[Any] = hidden_size __snake_case : str = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Optional[Any] = hidden_act __snake_case : int = intermediate_size __snake_case : str = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Union[str, Any] = max_position_embeddings __snake_case : Dict = initializer_range __snake_case : Any = layer_norm_eps __snake_case : Union[str, Any] = position_embedding_type __snake_case : Optional[int] = cross_attention_frequency __snake_case : Union[str, Any] = encoder_hidden_size @classmethod def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ): cls._set_token_in_kwargs(_UpperCAmelCase ) __snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __snake_case : List[Any] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "instructblip" __UpperCAmelCase = True def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ): super().__init__(**_UpperCAmelCase ) if vision_config is None: __snake_case : List[str] = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __snake_case : Union[str, Any] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __snake_case : str = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase ) __snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase ) __snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt' __snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase ) __snake_case : List[Any] = self.text_config.tie_word_embeddings __snake_case : Optional[int] = self.text_config.is_encoder_decoder __snake_case : List[str] = num_query_tokens __snake_case : Tuple = self.vision_config.hidden_size __snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __snake_case : str = 1.0 __snake_case : Optional[int] = 0.02 @classmethod def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , ) def lowercase_ ( self ): __snake_case : Tuple = copy.deepcopy(self.__dict__ ) __snake_case : Tuple = self.vision_config.to_dict() __snake_case : List[Any] = self.qformer_config.to_dict() __snake_case : Optional[int] = self.text_config.to_dict() __snake_case : List[str] = self.__class__.model_type return output
679
1
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path __magic_name__ = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) __magic_name__ = [ord(letter) for letter in string.ascii_lowercase] __magic_name__ = {ord(char) for char in VALID_CHARS} __magic_name__ = ["the", "be", "to", "of", "and", "in", "that", "have"] def UpperCAmelCase__( __UpperCAmelCase : list[int] , __UpperCAmelCase : tuple[int, ...] ): __snake_case : str = "" __snake_case : int __snake_case : int __snake_case : int for keychar, cipherchar in zip(cycle(__UpperCAmelCase ) , __UpperCAmelCase ): __snake_case : List[str] = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__UpperCAmelCase ) return decoded def UpperCAmelCase__( __UpperCAmelCase : list[int] ): __snake_case : list[str] = [] for key in product(__UpperCAmelCase , repeat=3 ): __snake_case : List[str] = try_key(__UpperCAmelCase , __UpperCAmelCase ) if encoded is not None: possibles.append(__UpperCAmelCase ) return possibles def UpperCAmelCase__( __UpperCAmelCase : list[str] , __UpperCAmelCase : str ): return [possible for possible in possibles if common_word in possible.lower()] def UpperCAmelCase__( __UpperCAmelCase : str = "p059_cipher.txt" ): __snake_case : list[int] __snake_case : list[str] __snake_case : str __snake_case : str __snake_case : str = Path(__UpperCAmelCase ).parent.joinpath(__UpperCAmelCase ).read_text(encoding='utf-8' ) __snake_case : Tuple = [int(__UpperCAmelCase ) for number in data.strip().split(',' )] __snake_case : List[Any] = filter_valid_chars(__UpperCAmelCase ) for common_word in COMMON_WORDS: __snake_case : Tuple = filter_common_word(__UpperCAmelCase , __UpperCAmelCase ) if len(__UpperCAmelCase ) == 1: break __snake_case : int = possibles[0] return sum(ord(__UpperCAmelCase ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
679
import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor __magic_name__ = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): warnings.warn( 'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use BeitImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
679
1
import numpy as np def UpperCAmelCase__( __UpperCAmelCase : np.ndarray ): return 1 / (1 + np.exp(-vector )) def UpperCAmelCase__( __UpperCAmelCase : np.ndarray ): return vector * sigmoid(__UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
679
import math import os import sys def UpperCAmelCase__( __UpperCAmelCase : str ): __snake_case : Union[str, Any] = '' try: with open(__UpperCAmelCase , 'rb' ) as binary_file: __snake_case : Optional[Any] = binary_file.read() for dat in data: __snake_case : Tuple = F"""{dat:08b}""" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def UpperCAmelCase__( __UpperCAmelCase : dict[str, str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : str ): lexicon.pop(__UpperCAmelCase ) __snake_case : Union[str, Any] = last_match_id if math.loga(__UpperCAmelCase ).is_integer(): for curr_key in lexicon: __snake_case : Tuple = '0' + lexicon[curr_key] __snake_case : Any = bin(__UpperCAmelCase )[2:] def UpperCAmelCase__( __UpperCAmelCase : str ): __snake_case : Tuple = {'0': '0', '1': '1'} __snake_case , __snake_case : Optional[int] = '', '' __snake_case : str = len(__UpperCAmelCase ) for i in range(len(__UpperCAmelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __snake_case : Optional[int] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) index += 1 __snake_case : Union[str, Any] = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __snake_case : Any = lexicon[curr_string] result += last_match_id return result def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ): __snake_case : str = os.path.getsize(__UpperCAmelCase ) __snake_case : List[Any] = bin(__UpperCAmelCase )[2:] __snake_case : Any = len(__UpperCAmelCase ) return "0" * (length_length - 1) + file_length_binary + compressed def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ): __snake_case : Tuple = 8 try: with open(__UpperCAmelCase , 'wb' ) as opened_file: __snake_case : int = [ to_write[i : i + byte_length] for i in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(__UpperCAmelCase , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : str ): __snake_case : str = read_file_binary(__UpperCAmelCase ) __snake_case : Tuple = compress_data(__UpperCAmelCase ) __snake_case : int = add_file_length(__UpperCAmelCase , __UpperCAmelCase ) write_file_binary(__UpperCAmelCase , __UpperCAmelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
679
1
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import ( BackboneOutput, BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from ...utils.backbone_utils import BackboneMixin from .configuration_resnet import ResNetConfig __magic_name__ = logging.get_logger(__name__) # General docstring __magic_name__ = '''ResNetConfig''' # Base docstring __magic_name__ = '''microsoft/resnet-50''' __magic_name__ = [1, 2_048, 7, 7] # Image classification docstring __magic_name__ = '''microsoft/resnet-50''' __magic_name__ = '''tiger cat''' __magic_name__ = [ '''microsoft/resnet-50''', # See all resnet models at https://huggingface.co/models?filter=resnet ] class __SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 3 , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ): super().__init__() __snake_case : Tuple = nn.Convad( _UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , bias=_UpperCAmelCase ) __snake_case : Tuple = nn.BatchNormad(_UpperCAmelCase ) __snake_case : List[str] = ACTaFN[activation] if activation is not None else nn.Identity() def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Union[str, Any] = self.convolution(_UpperCAmelCase ) __snake_case : Any = self.normalization(_UpperCAmelCase ) __snake_case : Union[str, Any] = self.activation(_UpperCAmelCase ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__() __snake_case : Dict = ResNetConvLayer( config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act ) __snake_case : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 ) __snake_case : Optional[int] = config.num_channels def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) __snake_case : int = self.embedder(_UpperCAmelCase ) __snake_case : Optional[int] = self.pooler(_UpperCAmelCase ) return embedding class __SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 ): super().__init__() __snake_case : Union[str, Any] = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase ) __snake_case : List[Any] = nn.BatchNormad(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Tuple = self.convolution(_UpperCAmelCase ) __snake_case : int = self.normalization(_UpperCAmelCase ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" ): super().__init__() __snake_case : List[Any] = in_channels != out_channels or stride != 1 __snake_case : Any = ( ResNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __snake_case : Optional[int] = nn.Sequential( ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , activation=_UpperCAmelCase ) , ) __snake_case : List[str] = ACTaFN[activation] def lowercase_ ( self , _UpperCAmelCase ): __snake_case : List[Any] = hidden_state __snake_case : Optional[Any] = self.layer(_UpperCAmelCase ) __snake_case : Optional[Any] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __snake_case : Optional[Any] = self.activation(_UpperCAmelCase ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 1 , _UpperCAmelCase = "relu" , _UpperCAmelCase = 4 ): super().__init__() __snake_case : Union[str, Any] = in_channels != out_channels or stride != 1 __snake_case : int = out_channels // reduction __snake_case : List[Any] = ( ResNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity() ) __snake_case : str = nn.Sequential( ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) , ResNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , ) __snake_case : Union[str, Any] = ACTaFN[activation] def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Dict = hidden_state __snake_case : int = self.layer(_UpperCAmelCase ) __snake_case : List[str] = self.shortcut(_UpperCAmelCase ) hidden_state += residual __snake_case : List[str] = self.activation(_UpperCAmelCase ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 2 , _UpperCAmelCase = 2 , ): super().__init__() __snake_case : Union[str, Any] = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer __snake_case : Optional[int] = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , activation=config.hidden_act ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Tuple = input for layer in self.layers: __snake_case : int = layer(_UpperCAmelCase ) return hidden_state class __SCREAMING_SNAKE_CASE ( nn.Module): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__() __snake_case : int = nn.ModuleList([] ) # based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input self.stages.append( ResNetStage( _UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) __snake_case : List[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ): self.stages.append(ResNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = True ): __snake_case : int = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __snake_case : List[str] = hidden_states + (hidden_state,) __snake_case : Any = stage_module(_UpperCAmelCase ) if output_hidden_states: __snake_case : Any = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention( last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase , ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = ResNetConfig __UpperCAmelCase = "resnet" __UpperCAmelCase = "pixel_values" __UpperCAmelCase = True def lowercase_ ( self , _UpperCAmelCase ): if isinstance(_UpperCAmelCase , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' ) elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): __snake_case : str = value __magic_name__ = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ResNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' __magic_name__ = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare ResNet model outputting raw features without any specific head on top." , UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__(_UpperCAmelCase ) __snake_case : List[str] = config __snake_case : List[Any] = ResNetEmbeddings(_UpperCAmelCase ) __snake_case : Optional[int] = ResNetEncoder(_UpperCAmelCase ) __snake_case : List[Any] = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ): __snake_case : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __snake_case : int = return_dict if return_dict is not None else self.config.use_return_dict __snake_case : List[str] = self.embedder(_UpperCAmelCase ) __snake_case : Optional[Any] = self.encoder( _UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __snake_case : Optional[int] = encoder_outputs[0] __snake_case : str = self.pooler(_UpperCAmelCase ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( "\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__(_UpperCAmelCase ) __snake_case : List[str] = config.num_labels __snake_case : List[str] = ResNetModel(_UpperCAmelCase ) # classification head __snake_case : List[Any] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowercase_ ( self , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , ): __snake_case : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict __snake_case : Optional[Any] = self.resnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __snake_case : str = outputs.pooler_output if return_dict else outputs[1] __snake_case : Union[str, Any] = self.classifier(_UpperCAmelCase ) __snake_case : List[str] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __snake_case : Optional[Any] = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __snake_case : Optional[int] = 'single_label_classification' else: __snake_case : List[Any] = 'multi_label_classification' if self.config.problem_type == "regression": __snake_case : int = MSELoss() if self.num_labels == 1: __snake_case : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: __snake_case : List[str] = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) elif self.config.problem_type == "single_label_classification": __snake_case : str = CrossEntropyLoss() __snake_case : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __snake_case : Any = BCEWithLogitsLoss() __snake_case : Dict = loss_fct(_UpperCAmelCase , _UpperCAmelCase ) if not return_dict: __snake_case : List[Any] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states ) @add_start_docstrings( "\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n " , UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__(_UpperCAmelCase ) super()._init_backbone(_UpperCAmelCase ) __snake_case : List[str] = [config.embedding_size] + config.hidden_sizes __snake_case : Optional[Any] = ResNetEmbeddings(_UpperCAmelCase ) __snake_case : List[str] = ResNetEncoder(_UpperCAmelCase ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) @replace_return_docstrings(output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None ): __snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict __snake_case : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __snake_case : List[Any] = self.embedder(_UpperCAmelCase ) __snake_case : str = self.encoder(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase ) __snake_case : Tuple = outputs.hidden_states __snake_case : Optional[int] = () for idx, stage in enumerate(self.stage_names ): if stage in self.out_features: feature_maps += (hidden_states[idx],) if not return_dict: __snake_case : int = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=_UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_UpperCAmelCase , )
679
from itertools import permutations def UpperCAmelCase__( __UpperCAmelCase : tuple ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False __snake_case : Any = [7, 11, 13, 17] for i, test in enumerate(__UpperCAmelCase ): if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def UpperCAmelCase__( __UpperCAmelCase : int = 10 ): return sum( int(''.join(map(__UpperCAmelCase , __UpperCAmelCase ) ) ) for num in permutations(range(__UpperCAmelCase ) ) if is_substring_divisible(__UpperCAmelCase ) ) if __name__ == "__main__": print(F'''{solution() = }''')
679
1
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') __magic_name__ = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __UpperCAmelCase = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def lowercase_ ( self ): if self.train_file is not None: __snake_case : Union[str, Any] = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __snake_case : List[str] = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = True __UpperCAmelCase = None __UpperCAmelCase = None def __call__( self , _UpperCAmelCase ): __snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels' __snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features] __snake_case : List[Any] = len(_UpperCAmelCase ) __snake_case : Union[str, Any] = len(features[0]['input_ids'] ) __snake_case : Union[str, Any] = [ [{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features ] __snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) ) __snake_case : Optional[Any] = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten __snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()} # Add back labels __snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) return batch def UpperCAmelCase__( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __snake_case : Tuple = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) datasets.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __snake_case : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __snake_case : Optional[int] = {} if data_args.train_file is not None: __snake_case : Optional[int] = data_args.train_file if data_args.validation_file is not None: __snake_case : int = data_args.validation_file __snake_case : int = data_args.train_file.split('.' )[-1] __snake_case : Tuple = load_dataset( __UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __snake_case : Optional[int] = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __snake_case : str = [F"""ending{i}""" for i in range(4 )] __snake_case : Optional[Any] = 'sent1' __snake_case : Tuple = 'sent2' if data_args.max_seq_length is None: __snake_case : List[Any] = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) __snake_case : List[Any] = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(__UpperCAmelCase : Tuple ): __snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]] __snake_case : Union[str, Any] = examples[question_header_name] __snake_case : Optional[int] = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase ) ] # Flatten out __snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) ) __snake_case : int = list(chain(*__UpperCAmelCase ) ) # Tokenize __snake_case : Tuple = tokenizer( __UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) __snake_case : Optional[Any] = raw_datasets['train'] if data_args.max_train_samples is not None: __snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples ) __snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): __snake_case : int = train_dataset.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) __snake_case : Optional[Any] = raw_datasets['validation'] if data_args.max_eval_samples is not None: __snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples ) __snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): __snake_case : List[Any] = eval_dataset.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __snake_case : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(__UpperCAmelCase : int ): __snake_case , __snake_case : Union[str, Any] = eval_predictions __snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __snake_case : List[str] = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , ) # Training if training_args.do_train: __snake_case : Dict = None if training_args.resume_from_checkpoint is not None: __snake_case : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: __snake_case : List[str] = last_checkpoint __snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __snake_case : List[Any] = train_result.metrics __snake_case : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase ) ) __snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics('train' , __UpperCAmelCase ) trainer.save_metrics('train' , __UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : Dict = trainer.evaluate() __snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase ) __snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics('eval' , __UpperCAmelCase ) trainer.save_metrics('eval' , __UpperCAmelCase ) __snake_case : List[Any] = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
679
# Function to print upper half of diamond (pyramid) def UpperCAmelCase__( __UpperCAmelCase : List[str] ): for i in range(0 , __UpperCAmelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(' ' , end='' ) for _ in range(0 , i + 1 ): # printing stars print('* ' , end='' ) print() def UpperCAmelCase__( __UpperCAmelCase : List[str] ): for i in range(__UpperCAmelCase , 0 , -1 ): for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars print('* ' , end='' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(' ' , end='' ) def UpperCAmelCase__( __UpperCAmelCase : List[Any] ): if n <= 0: print(' ... .... nothing printing :(' ) return floyd(__UpperCAmelCase ) # upper half reverse_floyd(__UpperCAmelCase ) # lower half if __name__ == "__main__": print(r'''| /\ | |- | |- |--| |\ /| |-''') print(r'''|/ \| |- |_ |_ |__| | \/ | |_''') __magic_name__ = 1 while K: __magic_name__ = int(input('''enter the number and , and see the magic : ''')) print() pretty_print(user_number) __magic_name__ = int(input('''press 0 to exit... and 1 to continue...''')) print('''Good Bye...''')
679
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "instructblip_vision_model" def __init__( self , _UpperCAmelCase=1_408 , _UpperCAmelCase=6_144 , _UpperCAmelCase=39 , _UpperCAmelCase=16 , _UpperCAmelCase=224 , _UpperCAmelCase=14 , _UpperCAmelCase="gelu" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=True , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) __snake_case : Optional[Any] = hidden_size __snake_case : Any = intermediate_size __snake_case : str = num_hidden_layers __snake_case : Any = num_attention_heads __snake_case : int = patch_size __snake_case : Dict = image_size __snake_case : Any = initializer_range __snake_case : List[Any] = attention_dropout __snake_case : Optional[Any] = layer_norm_eps __snake_case : Optional[int] = hidden_act __snake_case : int = qkv_bias @classmethod def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ): cls._set_token_in_kwargs(_UpperCAmelCase ) __snake_case , __snake_case : str = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __snake_case : Any = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "instructblip_qformer" def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=2 , _UpperCAmelCase=1_408 , **_UpperCAmelCase , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __snake_case : Union[str, Any] = vocab_size __snake_case : List[Any] = hidden_size __snake_case : str = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Optional[Any] = hidden_act __snake_case : int = intermediate_size __snake_case : str = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Union[str, Any] = max_position_embeddings __snake_case : Dict = initializer_range __snake_case : Any = layer_norm_eps __snake_case : Union[str, Any] = position_embedding_type __snake_case : Optional[int] = cross_attention_frequency __snake_case : Union[str, Any] = encoder_hidden_size @classmethod def lowercase_ ( cls , _UpperCAmelCase , **_UpperCAmelCase ): cls._set_token_in_kwargs(_UpperCAmelCase ) __snake_case , __snake_case : Optional[int] = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get('model_type' ) == "instructblip": __snake_case : List[Any] = config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "instructblip" __UpperCAmelCase = True def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=32 , **_UpperCAmelCase ): super().__init__(**_UpperCAmelCase ) if vision_config is None: __snake_case : List[str] = {} logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' ) if qformer_config is None: __snake_case : Union[str, Any] = {} logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' ) if text_config is None: __snake_case : str = {} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) __snake_case : Optional[Any] = InstructBlipVisionConfig(**_UpperCAmelCase ) __snake_case : Tuple = InstructBlipQFormerConfig(**_UpperCAmelCase ) __snake_case : List[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt' __snake_case : str = CONFIG_MAPPING[text_model_type](**_UpperCAmelCase ) __snake_case : List[Any] = self.text_config.tie_word_embeddings __snake_case : Optional[int] = self.text_config.is_encoder_decoder __snake_case : List[str] = num_query_tokens __snake_case : Tuple = self.vision_config.hidden_size __snake_case : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES __snake_case : str = 1.0 __snake_case : Optional[int] = 0.02 @classmethod def lowercase_ ( cls , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_UpperCAmelCase , ) def lowercase_ ( self ): __snake_case : Tuple = copy.deepcopy(self.__dict__ ) __snake_case : Tuple = self.vision_config.to_dict() __snake_case : List[Any] = self.qformer_config.to_dict() __snake_case : Optional[int] = self.text_config.to_dict() __snake_case : List[str] = self.__class__.model_type return output
679
from timeit import timeit def UpperCAmelCase__( __UpperCAmelCase : int ): if number < 0: raise ValueError('the value of input must not be negative' ) __snake_case : Dict = 0 while number: number &= number - 1 result += 1 return result def UpperCAmelCase__( __UpperCAmelCase : int ): if number < 0: raise ValueError('the value of input must not be negative' ) __snake_case : Tuple = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def UpperCAmelCase__( ): def do_benchmark(__UpperCAmelCase : int ) -> None: __snake_case : Optional[Any] = 'import __main__ as z' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" ) __snake_case : Dict = timeit('z.get_set_bits_count_using_modulo_operator(25)' , setup=__UpperCAmelCase ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" ) __snake_case : Dict = timeit( 'z.get_set_bits_count_using_brian_kernighans_algorithm(25)' , setup=__UpperCAmelCase , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__UpperCAmelCase ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
679
1
import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def lowercase_ ( self , _UpperCAmelCase ): with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: __snake_case : Optional[int] = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) __snake_case : int = input_file.read() __snake_case : List[Any] = regexp.search(_UpperCAmelCase ) return match def lowercase_ ( self , _UpperCAmelCase ): with open(_UpperCAmelCase , encoding='utf-8' ) as input_file: __snake_case : Any = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) __snake_case : Tuple = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` __snake_case : str = regexp.finditer(_UpperCAmelCase ) __snake_case : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def lowercase_ ( self ): __snake_case : Dict = Path('./datasets' ) __snake_case : List[str] = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ): raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" ) def lowercase_ ( self ): __snake_case : Optional[int] = Path('./datasets' ) __snake_case : Optional[Any] = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(_UpperCAmelCase ) ): raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
679
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def UpperCAmelCase__( __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=False ): try: __snake_case : Optional[int] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __snake_case : Union[str, Any] = default else: # KEY is set, convert it to True or False. try: __snake_case : Optional[Any] = strtobool(__UpperCAmelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"""If set, {key} must be yes or no.""" ) return _value __magic_name__ = parse_flag_from_env('''RUN_SLOW''', default=False) __magic_name__ = parse_flag_from_env('''RUN_REMOTE''', default=False) __magic_name__ = parse_flag_from_env('''RUN_LOCAL''', default=True) __magic_name__ = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression __magic_name__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') __magic_name__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') __magic_name__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio __magic_name__ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam __magic_name__ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility __magic_name__ = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows __magic_name__ = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def UpperCAmelCase__( __UpperCAmelCase : Any ): try: import faiss # noqa except ImportError: __snake_case : Dict = unittest.skip('test requires faiss' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : List[str] ): try: import regex # noqa except ImportError: __snake_case : List[str] = unittest.skip('test requires regex' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Optional[Any] ): try: import elasticsearch # noqa except ImportError: __snake_case : Tuple = unittest.skip('test requires elasticsearch' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Dict ): try: import sqlalchemy # noqa except ImportError: __snake_case : Dict = unittest.skip('test requires sqlalchemy' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ): if not config.TORCH_AVAILABLE: __snake_case : Optional[int] = unittest.skip('test requires PyTorch' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Any ): if not config.TF_AVAILABLE: __snake_case : Optional[Any] = unittest.skip('test requires TensorFlow' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : List[str] ): if not config.JAX_AVAILABLE: __snake_case : int = unittest.skip('test requires JAX' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Tuple ): if not config.PIL_AVAILABLE: __snake_case : Any = unittest.skip('test requires Pillow' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ): try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : Dict ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : Tuple ): try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ): def _require_spacy_model(__UpperCAmelCase : List[str] ): try: import spacy # noqa F401 spacy.load(__UpperCAmelCase ) except ImportError: return unittest.skip('test requires spacy' )(__UpperCAmelCase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(__UpperCAmelCase ) )(__UpperCAmelCase ) else: return test_case return _require_spacy_model def UpperCAmelCase__( __UpperCAmelCase : int ): try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : List[str] ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(__UpperCAmelCase ) else: return test_case def UpperCAmelCase__( __UpperCAmelCase : Any ): if not _run_slow_tests or _run_slow_tests == 0: __snake_case : List[str] = unittest.skip('test is slow' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : Dict ): if not _run_local_tests or _run_local_tests == 0: __snake_case : Tuple = unittest.skip('test is local' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : int ): if not _run_packaged_tests or _run_packaged_tests == 0: __snake_case : Dict = unittest.skip('test is packaged' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( __UpperCAmelCase : str ): if not _run_remote_tests or _run_remote_tests == 0: __snake_case : Tuple = unittest.skip('test requires remote' )(__UpperCAmelCase ) return test_case def UpperCAmelCase__( *__UpperCAmelCase : Any ): def decorate(cls : List[str] ): for name, fn in cls.__dict__.items(): if callable(__UpperCAmelCase ) and name.startswith('test' ): for decorator in decorators: __snake_case : Optional[Any] = decorator(__UpperCAmelCase ) setattr(cls , __UpperCAmelCase , __UpperCAmelCase ) return cls return decorate class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" pass class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = 0 __UpperCAmelCase = 1 __UpperCAmelCase = 2 @contextmanager def UpperCAmelCase__( __UpperCAmelCase : Union[str, Any]=OfflineSimulationMode.CONNECTION_FAILS , __UpperCAmelCase : List[Any]=1E-16 ): __snake_case : Optional[Any] = requests.Session().request def timeout_request(__UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , **__UpperCAmelCase : Union[str, Any] ): # Change the url to an invalid url so that the connection hangs __snake_case : int = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) __snake_case : str = timeout try: return online_request(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __snake_case : Any = url __snake_case : Union[str, Any] = e.args[0] __snake_case : int = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),) __snake_case : str = (max_retry_error,) raise def raise_connection_error(__UpperCAmelCase : str , __UpperCAmelCase : Dict , **__UpperCAmelCase : List[str] ): raise requests.ConnectionError('Offline mode is enabled.' , request=__UpperCAmelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , __UpperCAmelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , __UpperCAmelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , __UpperCAmelCase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def UpperCAmelCase__( *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : int ): __snake_case : Dict = str(Path().resolve() ) with tempfile.TemporaryDirectory(*__UpperCAmelCase , **__UpperCAmelCase ) as tmp_dir: try: os.chdir(__UpperCAmelCase ) yield finally: os.chdir(__UpperCAmelCase ) @contextmanager def UpperCAmelCase__( ): import gc gc.collect() __snake_case : Any = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def UpperCAmelCase__( ): import gc gc.collect() __snake_case : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] ): return deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() == deepcopy(__UpperCAmelCase ).integers(0 , 1_00 , 10 ).tolist() def UpperCAmelCase__( __UpperCAmelCase : List[str] ): import decorator from requests.exceptions import HTTPError def _wrapper(__UpperCAmelCase : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ): try: return func(*__UpperCAmelCase , **__UpperCAmelCase ) except HTTPError as err: if str(__UpperCAmelCase ).startswith('500' ) or str(__UpperCAmelCase ).startswith('502' ): pytest.xfail(str(__UpperCAmelCase ) ) raise err return decorator.decorator(_wrapper , __UpperCAmelCase ) class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : int = returncode __snake_case : Tuple = stdout __snake_case : List[Any] = stderr async def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ): while True: __snake_case : Optional[int] = await stream.readline() if line: callback(__UpperCAmelCase ) else: break async def UpperCAmelCase__( __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict=None , __UpperCAmelCase : int=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : int=False ): if echo: print('\nRunning: ' , ' '.join(__UpperCAmelCase ) ) __snake_case : Tuple = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__UpperCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__UpperCAmelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __snake_case : Any = [] __snake_case : Tuple = [] def tee(__UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]="" ): __snake_case : int = line.decode('utf-8' ).rstrip() sink.append(__UpperCAmelCase ) if not quiet: print(__UpperCAmelCase , __UpperCAmelCase , file=__UpperCAmelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda __UpperCAmelCase : tee(__UpperCAmelCase , __UpperCAmelCase , sys.stderr , label='stderr:' ) ), ] , timeout=__UpperCAmelCase , ) return _RunOutput(await p.wait() , __UpperCAmelCase , __UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=1_80 , __UpperCAmelCase : Any=False , __UpperCAmelCase : int=True ): __snake_case : Any = asyncio.get_event_loop() __snake_case : List[str] = loop.run_until_complete( _stream_subprocess(__UpperCAmelCase , env=__UpperCAmelCase , stdin=__UpperCAmelCase , timeout=__UpperCAmelCase , quiet=__UpperCAmelCase , echo=__UpperCAmelCase ) ) __snake_case : Dict = ' '.join(__UpperCAmelCase ) if result.returncode > 0: __snake_case : List[Any] = '\n'.join(result.stderr ) raise RuntimeError( F"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" F"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F"""'{cmd_str}' produced no output.""" ) return result def UpperCAmelCase__( ): __snake_case : List[str] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __snake_case : Optional[Any] = re.sub(r'^gw' , '' , __UpperCAmelCase , 0 , re.M ) return int(__UpperCAmelCase ) def UpperCAmelCase__( ): __snake_case : Dict = 2_95_00 __snake_case : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
679
1
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline __UpperCAmelCase = ["image_embeds", "negative_image_embeds", "image", "hint"] __UpperCAmelCase = ["image_embeds", "negative_image_embeds", "image", "hint"] __UpperCAmelCase = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __UpperCAmelCase = False @property def lowercase_ ( self ): return 32 @property def lowercase_ ( self ): return 32 @property def lowercase_ ( self ): return self.time_input_dim @property def lowercase_ ( self ): return self.time_input_dim * 4 @property def lowercase_ ( self ): return 100 @property def lowercase_ ( self ): torch.manual_seed(0 ) __snake_case : List[str] = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __snake_case : str = UNetaDConditionModel(**_UpperCAmelCase ) return model @property def lowercase_ ( self ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowercase_ ( self ): torch.manual_seed(0 ) __snake_case : int = VQModel(**self.dummy_movq_kwargs ) return model def lowercase_ ( self ): __snake_case : Union[str, Any] = self.dummy_unet __snake_case : Dict = self.dummy_movq __snake_case : Optional[Any] = { 'num_train_timesteps': 1_000, 'beta_schedule': 'linear', 'beta_start': 0.00085, 'beta_end': 0.012, 'clip_sample': False, 'set_alpha_to_one': False, 'steps_offset': 0, 'prediction_type': 'epsilon', 'thresholding': False, } __snake_case : Dict = DDIMScheduler(**_UpperCAmelCase ) __snake_case : int = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ): __snake_case : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) __snake_case : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _UpperCAmelCase ) # create init_image __snake_case : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) __snake_case : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case : Any = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((256, 256) ) # create hint __snake_case : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) if str(_UpperCAmelCase ).startswith('mps' ): __snake_case : Any = torch.manual_seed(_UpperCAmelCase ) else: __snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __snake_case : Dict = { 'image': init_image, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 10, 'guidance_scale': 7.0, 'strength': 0.2, 'output_type': 'np', } return inputs def lowercase_ ( self ): __snake_case : Optional[Any] = 'cpu' __snake_case : Optional[Any] = self.get_dummy_components() __snake_case : List[Any] = self.pipeline_class(**_UpperCAmelCase ) __snake_case : List[str] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __snake_case : List[Any] = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) __snake_case : Optional[Any] = output.images __snake_case : Dict = pipe( **self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0] __snake_case : str = image[0, -3:, -3:, -1] __snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case : List[Any] = np.array( [0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" def lowercase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ): __snake_case : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' ) __snake_case : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __snake_case : str = init_image.resize((512, 512) ) __snake_case : Any = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png' ) __snake_case : List[str] = torch.from_numpy(np.array(_UpperCAmelCase ) ).float() / 255.0 __snake_case : Any = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __snake_case : int = 'A robot, 4k photo' __snake_case : List[str] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(_UpperCAmelCase ) __snake_case : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa ) __snake_case : List[str] = pipeline.to(_UpperCAmelCase ) pipeline.set_progress_bar_config(disable=_UpperCAmelCase ) __snake_case : Tuple = torch.Generator(device='cpu' ).manual_seed(0 ) __snake_case , __snake_case : Union[str, Any] = pipe_prior( _UpperCAmelCase , image=_UpperCAmelCase , strength=0.85 , generator=_UpperCAmelCase , negative_prompt='' , ).to_tuple() __snake_case : str = pipeline( image=_UpperCAmelCase , image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , hint=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , ) __snake_case : Optional[Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
679
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar __magic_name__ = TypeVar('''T''') class __SCREAMING_SNAKE_CASE ( Generic[T]): """simple docstring""" def __init__( self , _UpperCAmelCase ): __snake_case : Optional[Any] = data __snake_case : Node[T] | None = None def __str__( self ): return F"""{self.data}""" class __SCREAMING_SNAKE_CASE ( Generic[T]): """simple docstring""" def __init__( self ): __snake_case : Node[T] | None = None def __iter__( self ): __snake_case : List[str] = self.top while node: yield node.data __snake_case : Union[str, Any] = node.next def __str__( self ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self ): return len(tuple(iter(self ) ) ) def lowercase_ ( self ): return self.top is None def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Any = Node(_UpperCAmelCase ) if not self.is_empty(): __snake_case : Any = self.top __snake_case : Dict = node def lowercase_ ( self ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) __snake_case : Optional[int] = self.top __snake_case : Dict = self.top.next return pop_node.data def lowercase_ ( self ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowercase_ ( self ): __snake_case : Optional[int] = None if __name__ == "__main__": from doctest import testmod testmod()
679
1
def UpperCAmelCase__( __UpperCAmelCase : int = 10**12 ): __snake_case : List[Any] = 1 __snake_case : str = 0 __snake_case : str = 1 __snake_case : int = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'''{solution() = }''')
679
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = ShapEPipeline __UpperCAmelCase = ["prompt"] __UpperCAmelCase = ["prompt"] __UpperCAmelCase = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] __UpperCAmelCase = False @property def lowercase_ ( self ): return 32 @property def lowercase_ ( self ): return 32 @property def lowercase_ ( self ): return self.time_input_dim * 4 @property def lowercase_ ( self ): return 8 @property def lowercase_ ( self ): __snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def lowercase_ ( self ): torch.manual_seed(0 ) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModelWithProjection(_UpperCAmelCase ) @property def lowercase_ ( self ): torch.manual_seed(0 ) __snake_case : Any = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __snake_case : Dict = PriorTransformer(**_UpperCAmelCase ) return model @property def lowercase_ ( self ): torch.manual_seed(0 ) __snake_case : Tuple = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __snake_case : Union[str, Any] = ShapERenderer(**_UpperCAmelCase ) return model def lowercase_ ( self ): __snake_case : Tuple = self.dummy_prior __snake_case : Dict = self.dummy_text_encoder __snake_case : Optional[int] = self.dummy_tokenizer __snake_case : str = self.dummy_renderer __snake_case : Tuple = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , ) __snake_case : Optional[int] = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=0 ): if str(_UpperCAmelCase ).startswith('mps' ): __snake_case : Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) else: __snake_case : int = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __snake_case : Tuple = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def lowercase_ ( self ): __snake_case : Optional[int] = 'cpu' __snake_case : Tuple = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**_UpperCAmelCase ) __snake_case : Any = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __snake_case : Any = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) __snake_case : Union[str, Any] = output.images[0] __snake_case : Tuple = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __snake_case : Dict = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowercase_ ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase_ ( self ): __snake_case : List[str] = torch_device == 'cpu' __snake_case : int = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , ) def lowercase_ ( self ): __snake_case : Dict = self.get_dummy_components() __snake_case : Any = self.pipeline_class(**_UpperCAmelCase ) __snake_case : Tuple = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __snake_case : int = 1 __snake_case : Optional[int] = 2 __snake_case : List[Any] = self.get_dummy_inputs(_UpperCAmelCase ) for key in inputs.keys(): if key in self.batch_params: __snake_case : Union[str, Any] = batch_size * [inputs[key]] __snake_case : Any = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" def lowercase_ ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ): __snake_case : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) __snake_case : Any = ShapEPipeline.from_pretrained('openai/shap-e' ) __snake_case : List[str] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __snake_case : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) __snake_case : Optional[Any] = pipe( 'a shark' , generator=_UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
679
1
from packaging import version from .import_utils import is_accelerate_available if is_accelerate_available(): import accelerate def UpperCAmelCase__( __UpperCAmelCase : Any ): if not is_accelerate_available(): return method __snake_case : Any = version.parse(accelerate.__version__ ).base_version if version.parse(__UpperCAmelCase ) < version.parse('0.17.0' ): return method def wrapper(self : str , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Union[str, Any] ): if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ): self._hf_hook.pre_forward(self ) return method(self , *__UpperCAmelCase , **__UpperCAmelCase ) return wrapper
679
import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any ): # Initialise PyTorch model __snake_case : List[str] = TaConfig.from_json_file(__UpperCAmelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) __snake_case : int = TaForConditionalGeneration(__UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_ta(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __magic_name__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
679
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ): __snake_case : List[Any] = parent __snake_case : str = batch_size __snake_case : str = image_size __snake_case : str = num_channels __snake_case : Dict = embeddings_size __snake_case : Dict = hidden_sizes __snake_case : Optional[Any] = depths __snake_case : List[str] = is_training __snake_case : Union[str, Any] = use_labels __snake_case : Optional[int] = hidden_act __snake_case : Optional[int] = num_labels __snake_case : Tuple = scope __snake_case : Union[str, Any] = len(_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Optional[int] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : Any = self.get_config() return config, pixel_values, labels def lowercase_ ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : str = TFResNetModel(config=_UpperCAmelCase ) __snake_case : Any = model(_UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : List[Any] = self.num_labels __snake_case : List[str] = TFResNetForImageClassification(_UpperCAmelCase ) __snake_case : Optional[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self ): __snake_case : Dict = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Optional[Any] = config_and_inputs __snake_case : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () __UpperCAmelCase = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def lowercase_ ( self ): __snake_case : Any = TFResNetModelTester(self ) __snake_case : str = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def lowercase_ ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowercase_ ( self ): return @unittest.skip(reason='ResNet does not use inputs_embeds' ) def lowercase_ ( self ): pass @unittest.skip(reason='ResNet does not support input and output embeddings' ) def lowercase_ ( self ): pass def lowercase_ ( self ): __snake_case , __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = model_class(_UpperCAmelCase ) __snake_case : Union[str, Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : str = [*signature.parameters.keys()] __snake_case : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowercase_ ( self ): def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : int = model_class(_UpperCAmelCase ) __snake_case : Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __snake_case : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : int = self.model_tester.num_stages self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : int = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: __snake_case : Tuple = layer_type __snake_case : Optional[int] = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Dict = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def lowercase_ ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Optional[Any] = TFResNetModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def UpperCAmelCase__( ): __snake_case : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" @cached_property def lowercase_ ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowercase_ ( self ): __snake_case : Optional[Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __snake_case : Optional[int] = self.default_image_processor __snake_case : Any = prepare_img() __snake_case : Union[str, Any] = image_processor(images=_UpperCAmelCase , return_tensors='tf' ) # forward pass __snake_case : List[Any] = model(**_UpperCAmelCase ) # verify the logits __snake_case : Optional[Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __snake_case : Tuple = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1E-4 ) )
679
import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) __magic_name__ = logging.getLogger(__name__) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None ): __snake_case : List[Any] = self.layer[current_layer](_UpperCAmelCase , _UpperCAmelCase , head_mask[current_layer] ) __snake_case : Optional[Any] = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__(_UpperCAmelCase ) __snake_case : List[Any] = BertEncoderWithPabee(_UpperCAmelCase ) self.init_weights() __snake_case : str = 0 __snake_case : List[str] = 0 __snake_case : int = 0 __snake_case : Tuple = 0 def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Dict = threshold def lowercase_ ( self , _UpperCAmelCase ): __snake_case : List[Any] = patience def lowercase_ ( self ): __snake_case : Dict = 0 __snake_case : Dict = 0 def lowercase_ ( self ): __snake_case : Union[str, Any] = self.inference_layers_num / self.inference_instances_num __snake_case : int = ( F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(_UpperCAmelCase ) @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , ): if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: __snake_case : Union[str, Any] = input_ids.size() elif inputs_embeds is not None: __snake_case : int = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) __snake_case : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __snake_case : List[str] = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) if token_type_ids is None: __snake_case : int = torch.zeros(_UpperCAmelCase , dtype=torch.long , device=_UpperCAmelCase ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __snake_case : torch.Tensor = self.get_extended_attention_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __snake_case , __snake_case , __snake_case : Optional[int] = encoder_hidden_states.size() __snake_case : List[Any] = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __snake_case : Tuple = torch.ones(_UpperCAmelCase , device=_UpperCAmelCase ) __snake_case : Optional[int] = self.invert_attention_mask(_UpperCAmelCase ) else: __snake_case : str = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __snake_case : int = self.get_head_mask(_UpperCAmelCase , self.config.num_hidden_layers ) __snake_case : Any = self.embeddings( input_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase ) __snake_case : List[str] = embedding_output if self.training: __snake_case : Dict = [] for i in range(self.config.num_hidden_layers ): __snake_case : str = self.encoder.adaptive_forward( _UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase ) __snake_case : Optional[Any] = self.pooler(_UpperCAmelCase ) __snake_case : Any = output_layers[i](output_dropout(_UpperCAmelCase ) ) res.append(_UpperCAmelCase ) elif self.patience == 0: # Use all layers for inference __snake_case : Dict = self.encoder( _UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , ) __snake_case : str = self.pooler(encoder_outputs[0] ) __snake_case : Tuple = [output_layers[self.config.num_hidden_layers - 1](_UpperCAmelCase )] else: __snake_case : List[str] = 0 __snake_case : str = None __snake_case : Tuple = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __snake_case : List[Any] = self.encoder.adaptive_forward( _UpperCAmelCase , current_layer=_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase ) __snake_case : Any = self.pooler(_UpperCAmelCase ) __snake_case : int = output_layers[i](_UpperCAmelCase ) if regression: __snake_case : Optional[int] = logits.detach() if patient_result is not None: __snake_case : Dict = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __snake_case : Any = 0 else: __snake_case : str = logits.detach().argmax(dim=1 ) if patient_result is not None: __snake_case : List[str] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(_UpperCAmelCase ) ): patient_counter += 1 else: __snake_case : Dict = 0 __snake_case : str = logits if patient_counter == self.patience: break __snake_case : str = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCamelCase , ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def __init__( self , _UpperCAmelCase ): super().__init__(_UpperCAmelCase ) __snake_case : List[str] = config.num_labels __snake_case : Dict = BertModelWithPabee(_UpperCAmelCase ) __snake_case : int = nn.Dropout(config.hidden_dropout_prob ) __snake_case : Optional[int] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ): __snake_case : List[str] = self.bert( input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __snake_case : int = (logits[-1],) if labels is not None: __snake_case : List[Any] = None __snake_case : Optional[int] = 0 for ix, logits_item in enumerate(_UpperCAmelCase ): if self.num_labels == 1: # We are doing regression __snake_case : List[str] = MSELoss() __snake_case : List[str] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __snake_case : List[str] = CrossEntropyLoss() __snake_case : Optional[int] = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __snake_case : List[Any] = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __snake_case : int = (total_loss / total_weights,) + outputs return outputs
679
1
from __future__ import annotations def UpperCAmelCase__( __UpperCAmelCase : int , __UpperCAmelCase : int ): if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: raise ValueError('partitions can not > number_of_bytes!' ) __snake_case : str = number_of_bytes // partitions __snake_case : Union[str, Any] = [] for i in range(__UpperCAmelCase ): __snake_case : Tuple = i * bytes_per_partition + 1 __snake_case : List[Any] = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(F"""{start_bytes}-{end_bytes}""" ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
679
def UpperCAmelCase__( __UpperCAmelCase : str ): if not all(x.isalpha() for x in string ): raise ValueError('String must only contain alphabetic characters.' ) __snake_case : str = sorted(string.lower() ) return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) ) if __name__ == "__main__": __magic_name__ = input('''Enter a string ''').strip() __magic_name__ = is_isogram(input_str) print(F'''{input_str} is {"an" if isogram else "not an"} isogram.''')
679
1
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = '''▁''' __magic_name__ = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } __magic_name__ = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } __magic_name__ = { '''facebook/s2t-small-librispeech-asr''': 1_024, } __magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] __magic_name__ = {'''mustc''': MUSTC_LANGS} class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = MAX_MODEL_INPUT_SIZES __UpperCAmelCase = ["input_ids", "attention_mask"] __UpperCAmelCase = [] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ): __snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __snake_case : Dict = do_upper_case __snake_case : Optional[Any] = do_lower_case __snake_case : List[Any] = load_json(_UpperCAmelCase ) __snake_case : Dict = {v: k for k, v in self.encoder.items()} __snake_case : Optional[Any] = spm_file __snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs ) if lang_codes is not None: __snake_case : Optional[Any] = lang_codes __snake_case : int = LANGUAGES[lang_codes] __snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs] __snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs} __snake_case : Dict = self.lang_tokens __snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __snake_case : Optional[int] = {} @property def lowercase_ ( self ): return len(self.encoder ) @property def lowercase_ ( self ): return self._tgt_lang @tgt_lang.setter def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = new_tgt_lang self.set_tgt_lang_special_tokens(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Tuple = self.lang_code_to_id[tgt_lang] __snake_case : Optional[Any] = [lang_code_id] def lowercase_ ( self , _UpperCAmelCase ): return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] ) def lowercase_ ( self , _UpperCAmelCase ): return self.decoder.get(_UpperCAmelCase , self.unk_token ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = [] __snake_case : Any = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __snake_case : Dict = self.sp_model.decode(_UpperCAmelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __snake_case : Any = [] else: current_sub_tokens.append(_UpperCAmelCase ) __snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) __snake_case : Union[str, Any] = [1] * len(self.prefix_tokens ) __snake_case : Optional[Any] = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def lowercase_ ( self ): __snake_case : List[Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __snake_case : int = self.__dict__.copy() __snake_case : str = None return state def __setstate__( self , _UpperCAmelCase ): __snake_case : List[Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __snake_case : Optional[int] = {} __snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __snake_case : str = Path(_UpperCAmelCase ) assert save_dir.is_dir(), F"""{save_directory} should be a directory""" __snake_case : int = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) __snake_case : Union[str, Any] = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , _UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(_UpperCAmelCase , 'wb' ) as fi: __snake_case : List[str] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (str(_UpperCAmelCase ), str(_UpperCAmelCase )) def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ): __snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase ) spm.Load(str(__UpperCAmelCase ) ) return spm def UpperCAmelCase__( __UpperCAmelCase : str ): with open(__UpperCAmelCase , 'r' ) as f: return json.load(__UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ): with open(__UpperCAmelCase , 'w' ) as f: json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
679
from ....configuration_utils import PretrainedConfig from ....utils import logging __magic_name__ = logging.get_logger(__name__) # TODO: upload to AWS __magic_name__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "retribert" def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __snake_case : Tuple = vocab_size __snake_case : Optional[int] = hidden_size __snake_case : str = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Any = hidden_act __snake_case : List[Any] = intermediate_size __snake_case : Dict = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Optional[int] = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = layer_norm_eps __snake_case : int = share_encoders __snake_case : Optional[Any] = projection_dim
679
1
# Function to print upper half of diamond (pyramid) def UpperCAmelCase__( __UpperCAmelCase : List[str] ): for i in range(0 , __UpperCAmelCase ): for _ in range(0 , n - i - 1 ): # printing spaces print(' ' , end='' ) for _ in range(0 , i + 1 ): # printing stars print('* ' , end='' ) print() def UpperCAmelCase__( __UpperCAmelCase : List[str] ): for i in range(__UpperCAmelCase , 0 , -1 ): for _ in range(__UpperCAmelCase , 0 , -1 ): # printing stars print('* ' , end='' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(' ' , end='' ) def UpperCAmelCase__( __UpperCAmelCase : List[Any] ): if n <= 0: print(' ... .... nothing printing :(' ) return floyd(__UpperCAmelCase ) # upper half reverse_floyd(__UpperCAmelCase ) # lower half if __name__ == "__main__": print(r'''| /\ | |- | |- |--| |\ /| |-''') print(r'''|/ \| |- |_ |_ |__| | \/ | |_''') __magic_name__ = 1 while K: __magic_name__ = int(input('''enter the number and , and see the magic : ''')) print() pretty_print(user_number) __magic_name__ = int(input('''press 0 to exit... and 1 to continue...''')) print('''Good Bye...''')
679
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
679
1
class __SCREAMING_SNAKE_CASE : # Public class to implement a graph """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Optional[Any] = row __snake_case : Optional[int] = col __snake_case : str = graph def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): # Checking all 8 elements surrounding nth element __snake_case : str = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __snake_case : Optional[int] = [-1, 0, 1, -1, 1, -1, 0, 1] __snake_case : List[Any] = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , _UpperCAmelCase ) def lowercase_ ( self ): # And finally, count all islands. __snake_case : Tuple = [[False for j in range(self.COL )] for i in range(self.ROW )] __snake_case : int = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) count += 1 return count
679
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" def lowercase_ ( self ): __snake_case : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) ) class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ): __snake_case : List[str] = parent __snake_case : Tuple = batch_size __snake_case : str = image_size __snake_case : Union[str, Any] = patch_size __snake_case : Optional[int] = num_channels __snake_case : List[str] = last_hidden_size __snake_case : Optional[Any] = num_attention_heads __snake_case : Dict = hidden_act __snake_case : List[Any] = conv_kernel_size __snake_case : int = output_stride __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : Dict = attention_probs_dropout_prob __snake_case : Any = classifier_dropout_prob __snake_case : str = use_labels __snake_case : Optional[Any] = is_training __snake_case : Dict = num_labels __snake_case : str = initializer_range __snake_case : Union[str, Any] = scope def lowercase_ ( self ): __snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : str = None __snake_case : Dict = None if self.use_labels: __snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def lowercase_ ( self ): return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : List[Any] = model(_UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Tuple = self.num_labels __snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Optional[Any] = self.num_labels __snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : Tuple = model(_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowercase_ ( self ): __snake_case : Optional[int] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs __snake_case : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) __UpperCAmelCase = ( { "feature-extraction": MobileViTModel, "image-classification": MobileViTForImageClassification, "image-segmentation": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def lowercase_ ( self ): __snake_case : Dict = MobileViTModelTester(self ) __snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase ) def lowercase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def lowercase_ ( self ): pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def lowercase_ ( self ): pass @unittest.skip(reason='MobileViT does not output attentions' ) def lowercase_ ( self ): pass def lowercase_ ( self ): __snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Tuple = model_class(_UpperCAmelCase ) __snake_case : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : List[str] = [*signature.parameters.keys()] __snake_case : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self ): pass def lowercase_ ( self ): __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowercase_ ( self ): def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : str = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): __snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) __snake_case : Optional[Any] = outputs.hidden_states __snake_case : str = 5 self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __snake_case : Optional[Any] = 2 for i in range(len(_UpperCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Tuple = True check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase ) @slow def lowercase_ ( self ): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def UpperCAmelCase__( ): __snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" @cached_property def lowercase_ ( self ): return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def lowercase_ ( self ): __snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase ) __snake_case : Union[str, Any] = self.default_image_processor __snake_case : str = prepare_img() __snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : Tuple = model(**_UpperCAmelCase ) # verify the logits __snake_case : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowercase_ ( self ): __snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : str = model.to(_UpperCAmelCase ) __snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : Optional[int] = prepare_img() __snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : int = model(**_UpperCAmelCase ) __snake_case : int = outputs.logits # verify the logits __snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , _UpperCAmelCase ) __snake_case : Optional[int] = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=_UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowercase_ ( self ): __snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : str = model.to(_UpperCAmelCase ) __snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : Any = prepare_img() __snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : Optional[Any] = model(**_UpperCAmelCase ) __snake_case : str = outputs.logits.detach().cpu() __snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] ) __snake_case : List[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , _UpperCAmelCase ) __snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase ) __snake_case : List[str] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
679
1
import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency __magic_name__ = { '''E''': 12.70, '''T''': 9.06, '''A''': 8.17, '''O''': 7.51, '''I''': 6.97, '''N''': 6.75, '''S''': 6.33, '''H''': 6.09, '''R''': 5.99, '''D''': 4.25, '''L''': 4.03, '''C''': 2.78, '''U''': 2.76, '''M''': 2.41, '''W''': 2.36, '''F''': 2.23, '''G''': 2.02, '''Y''': 1.97, '''P''': 1.93, '''B''': 1.29, '''V''': 0.98, '''K''': 0.77, '''J''': 0.15, '''X''': 0.15, '''Q''': 0.10, '''Z''': 0.07, } __magic_name__ = '''ETAOINSHRDLCUMWFGYPBVKJXQZ''' __magic_name__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def UpperCAmelCase__( __UpperCAmelCase : str ): __snake_case : str = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def UpperCAmelCase__( __UpperCAmelCase : tuple ): return x[0] def UpperCAmelCase__( __UpperCAmelCase : str ): __snake_case : Optional[int] = get_letter_count(__UpperCAmelCase ) __snake_case : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__UpperCAmelCase ) __snake_case : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__UpperCAmelCase ) __snake_case : List[Any] = ''.join(freq_to_letter[freq] ) __snake_case : List[str] = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__UpperCAmelCase , reverse=__UpperCAmelCase ) __snake_case : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : str ): __snake_case : Optional[Any] = get_frequency_order(__UpperCAmelCase ) __snake_case : Dict = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
679
def UpperCAmelCase__( __UpperCAmelCase : int | float | str ): try: __snake_case : int = float(__UpperCAmelCase ) except ValueError: raise ValueError('Please enter a valid number' ) __snake_case : Any = decimal - int(__UpperCAmelCase ) if fractional_part == 0: return int(__UpperCAmelCase ), 1 else: __snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] ) __snake_case : Tuple = int(decimal * (10**number_of_frac_digits) ) __snake_case : List[Any] = 10**number_of_frac_digits __snake_case , __snake_case : List[Any] = denominator, numerator while True: __snake_case : Any = dividend % divisor if remainder == 0: break __snake_case , __snake_case : Optional[int] = divisor, remainder __snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor return int(__UpperCAmelCase ), int(__UpperCAmelCase ) if __name__ == "__main__": print(F'''{decimal_to_fraction(2) = }''') print(F'''{decimal_to_fraction(89.0) = }''') print(F'''{decimal_to_fraction("67") = }''') print(F'''{decimal_to_fraction("45.0") = }''') print(F'''{decimal_to_fraction(1.5) = }''') print(F'''{decimal_to_fraction("6.25") = }''') print(F'''{decimal_to_fraction("78td") = }''')
679
1
import numpy class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : List[str] = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. __snake_case : Tuple = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. __snake_case : Any = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. __snake_case : Union[str, Any] = numpy.random.rand(3 , 1 ) # Real output values provided. __snake_case : Tuple = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. __snake_case : Dict = numpy.zeros(output_array.shape ) def lowercase_ ( self ): __snake_case : List[Any] = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. __snake_case : Optional[int] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. __snake_case : int = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowercase_ ( self ): __snake_case : Optional[Any] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) __snake_case : int = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) __snake_case : int = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): for iteration in range(1 , iterations + 1 ): __snake_case : Tuple = self.feedforward() self.back_propagation() if give_loss: __snake_case : int = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F"""Iteration {iteration} Loss: {loss}""" ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Dict = input_arr __snake_case : List[Any] = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) __snake_case : List[str] = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) __snake_case : Union[str, Any] = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def UpperCAmelCase__( __UpperCAmelCase : numpy.ndarray ): return 1 / (1 + numpy.exp(-value )) def UpperCAmelCase__( __UpperCAmelCase : numpy.ndarray ): return (value) * (1 - (value)) def UpperCAmelCase__( ): __snake_case : List[Any] = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. __snake_case : Dict = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. __snake_case : Union[str, Any] = TwoHiddenLayerNeuralNetwork( input_array=__UpperCAmelCase , output_array=__UpperCAmelCase ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=__UpperCAmelCase , iterations=10 , give_loss=__UpperCAmelCase ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
679
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') __magic_name__ = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __UpperCAmelCase = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = field(default=UpperCamelCase , metadata={"help": "The input training data file (a text file)."}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"}) __UpperCAmelCase = field( default=UpperCamelCase , metadata={"help": "The number of processes to use for the preprocessing."} , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __UpperCAmelCase = field( default=UpperCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def lowercase_ ( self ): if self.train_file is not None: __snake_case : Union[str, Any] = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __snake_case : List[str] = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = True __UpperCAmelCase = None __UpperCAmelCase = None def __call__( self , _UpperCAmelCase ): __snake_case : Tuple = 'label' if 'label' in features[0].keys() else 'labels' __snake_case : Dict = [feature.pop(_UpperCAmelCase ) for feature in features] __snake_case : List[Any] = len(_UpperCAmelCase ) __snake_case : Union[str, Any] = len(features[0]['input_ids'] ) __snake_case : Union[str, Any] = [ [{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features ] __snake_case : Union[str, Any] = list(chain(*_UpperCAmelCase ) ) __snake_case : Optional[Any] = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten __snake_case : Any = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()} # Add back labels __snake_case : int = torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) return batch def UpperCAmelCase__( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __snake_case : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case , __snake_case , __snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case , __snake_case , __snake_case : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , __UpperCAmelCase , __UpperCAmelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __snake_case : Tuple = training_args.get_process_log_level() logger.setLevel(__UpperCAmelCase ) datasets.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.set_verbosity(__UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. __snake_case : Dict = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case : str = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __snake_case : Optional[int] = {} if data_args.train_file is not None: __snake_case : Optional[int] = data_args.train_file if data_args.validation_file is not None: __snake_case : int = data_args.validation_file __snake_case : int = data_args.train_file.split('.' )[-1] __snake_case : Tuple = load_dataset( __UpperCAmelCase , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __snake_case : Optional[int] = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case : List[Any] = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __snake_case : str = [F"""ending{i}""" for i in range(4 )] __snake_case : Optional[Any] = 'sent1' __snake_case : Tuple = 'sent2' if data_args.max_seq_length is None: __snake_case : List[Any] = tokenizer.model_max_length if max_seq_length > 10_24: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) __snake_case : List[Any] = 10_24 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) __snake_case : str = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(__UpperCAmelCase : Tuple ): __snake_case : Union[str, Any] = [[context] * 4 for context in examples[context_name]] __snake_case : Union[str, Any] = examples[question_header_name] __snake_case : Optional[int] = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(__UpperCAmelCase ) ] # Flatten out __snake_case : Optional[Any] = list(chain(*__UpperCAmelCase ) ) __snake_case : int = list(chain(*__UpperCAmelCase ) ) # Tokenize __snake_case : Tuple = tokenizer( __UpperCAmelCase , __UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(__UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) __snake_case : Optional[Any] = raw_datasets['train'] if data_args.max_train_samples is not None: __snake_case : Tuple = min(len(__UpperCAmelCase ) , data_args.max_train_samples ) __snake_case : List[str] = train_dataset.select(range(__UpperCAmelCase ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): __snake_case : int = train_dataset.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) __snake_case : Optional[Any] = raw_datasets['validation'] if data_args.max_eval_samples is not None: __snake_case : List[Any] = min(len(__UpperCAmelCase ) , data_args.max_eval_samples ) __snake_case : Optional[Any] = eval_dataset.select(range(__UpperCAmelCase ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): __snake_case : List[Any] = eval_dataset.map( __UpperCAmelCase , batched=__UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __snake_case : str = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=__UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(__UpperCAmelCase : int ): __snake_case , __snake_case : Union[str, Any] = eval_predictions __snake_case : Tuple = np.argmax(__UpperCAmelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __snake_case : List[str] = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , ) # Training if training_args.do_train: __snake_case : Dict = None if training_args.resume_from_checkpoint is not None: __snake_case : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: __snake_case : List[str] = last_checkpoint __snake_case : List[str] = trainer.train(resume_from_checkpoint=__UpperCAmelCase ) trainer.save_model() # Saves the tokenizer too for easy upload __snake_case : List[Any] = train_result.metrics __snake_case : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase ) ) __snake_case : Tuple = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics('train' , __UpperCAmelCase ) trainer.save_metrics('train' , __UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case : Dict = trainer.evaluate() __snake_case : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase ) __snake_case : Optional[Any] = min(__UpperCAmelCase , len(__UpperCAmelCase ) ) trainer.log_metrics('eval' , __UpperCAmelCase ) trainer.save_metrics('eval' , __UpperCAmelCase ) __snake_case : List[Any] = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**__UpperCAmelCase ) else: trainer.create_model_card(**__UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
679
1
from ....configuration_utils import PretrainedConfig from ....utils import logging __magic_name__ = logging.get_logger(__name__) # TODO: upload to AWS __magic_name__ = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "retribert" def __init__( self , _UpperCAmelCase=30_522 , _UpperCAmelCase=768 , _UpperCAmelCase=8 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=True , _UpperCAmelCase=128 , _UpperCAmelCase=0 , **_UpperCAmelCase , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) __snake_case : Tuple = vocab_size __snake_case : Optional[int] = hidden_size __snake_case : str = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : Any = hidden_act __snake_case : List[Any] = intermediate_size __snake_case : Dict = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Optional[int] = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : Union[str, Any] = initializer_range __snake_case : Optional[Any] = layer_norm_eps __snake_case : int = share_encoders __snake_case : Optional[Any] = projection_dim
679
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = '''▁''' __magic_name__ = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } __magic_name__ = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } __magic_name__ = { '''facebook/s2t-small-librispeech-asr''': 1_024, } __magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] __magic_name__ = {'''mustc''': MUSTC_LANGS} class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = MAX_MODEL_INPUT_SIZES __UpperCAmelCase = ["input_ids", "attention_mask"] __UpperCAmelCase = [] def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ): __snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) __snake_case : Dict = do_upper_case __snake_case : Optional[Any] = do_lower_case __snake_case : List[Any] = load_json(_UpperCAmelCase ) __snake_case : Dict = {v: k for k, v in self.encoder.items()} __snake_case : Optional[Any] = spm_file __snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs ) if lang_codes is not None: __snake_case : Optional[Any] = lang_codes __snake_case : int = LANGUAGES[lang_codes] __snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs] __snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs} __snake_case : Dict = self.lang_tokens __snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __snake_case : Optional[int] = {} @property def lowercase_ ( self ): return len(self.encoder ) @property def lowercase_ ( self ): return self._tgt_lang @tgt_lang.setter def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = new_tgt_lang self.set_tgt_lang_special_tokens(_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Tuple = self.lang_code_to_id[tgt_lang] __snake_case : Optional[Any] = [lang_code_id] def lowercase_ ( self , _UpperCAmelCase ): return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase ): return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] ) def lowercase_ ( self , _UpperCAmelCase ): return self.decoder.get(_UpperCAmelCase , self.unk_token ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : str = [] __snake_case : Any = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __snake_case : Dict = self.sp_model.decode(_UpperCAmelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __snake_case : Any = [] else: current_sub_tokens.append(_UpperCAmelCase ) __snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) __snake_case : Union[str, Any] = [1] * len(self.prefix_tokens ) __snake_case : Optional[Any] = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def lowercase_ ( self ): __snake_case : List[Any] = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): __snake_case : int = self.__dict__.copy() __snake_case : str = None return state def __setstate__( self , _UpperCAmelCase ): __snake_case : List[Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __snake_case : Optional[int] = {} __snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __snake_case : str = Path(_UpperCAmelCase ) assert save_dir.is_dir(), F"""{save_directory} should be a directory""" __snake_case : int = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) __snake_case : Union[str, Any] = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , _UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(_UpperCAmelCase , 'wb' ) as fi: __snake_case : List[str] = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (str(_UpperCAmelCase ), str(_UpperCAmelCase )) def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ): __snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase ) spm.Load(str(__UpperCAmelCase ) ) return spm def UpperCAmelCase__( __UpperCAmelCase : str ): with open(__UpperCAmelCase , 'r' ) as f: return json.load(__UpperCAmelCase ) def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ): with open(__UpperCAmelCase , 'w' ) as f: json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
679
1
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def UpperCAmelCase__( __UpperCAmelCase : str ): return EnvironmentCommand() def UpperCAmelCase__( __UpperCAmelCase : Optional[int] ): return EnvironmentCommand(args.accelerate_config_file ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" @staticmethod def lowercase_ ( _UpperCAmelCase ): __snake_case : Any = parser.add_parser('env' ) download_parser.set_defaults(func=_UpperCAmelCase ) download_parser.add_argument( '--accelerate-config_file' , default=_UpperCAmelCase , help='The accelerate config file to use for the default values in the launching script.' , ) download_parser.set_defaults(func=_UpperCAmelCase ) def __init__( self , _UpperCAmelCase , *_UpperCAmelCase ): __snake_case : List[str] = accelerate_config_file def lowercase_ ( self ): __snake_case : Optional[int] = 'not installed' if is_safetensors_available(): import safetensors __snake_case : List[Any] = safetensors.__version__ elif importlib.util.find_spec('safetensors' ) is not None: import safetensors __snake_case : str = F"""{safetensors.__version__} but is ignored because of PyTorch version too old.""" __snake_case : List[Any] = 'not installed' __snake_case : Tuple = 'not found' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __snake_case : List[str] = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(_UpperCAmelCase ): __snake_case : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict() __snake_case : int = ( '\n'.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else F"""\t{accelerate_config}""" ) __snake_case : Union[str, Any] = 'not installed' __snake_case : Union[str, Any] = 'NA' if is_torch_available(): import torch __snake_case : str = torch.__version__ __snake_case : Union[str, Any] = torch.cuda.is_available() __snake_case : Any = 'not installed' __snake_case : Union[str, Any] = 'NA' if is_tf_available(): import tensorflow as tf __snake_case : Tuple = tf.__version__ try: # deprecated in v2.1 __snake_case : Dict = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __snake_case : Tuple = bool(tf.config.list_physical_devices('GPU' ) ) __snake_case : List[Any] = 'not installed' __snake_case : Any = 'not installed' __snake_case : int = 'not installed' __snake_case : Union[str, Any] = 'NA' if is_flax_available(): import flax import jax import jaxlib __snake_case : List[Any] = flax.__version__ __snake_case : Optional[int] = jax.__version__ __snake_case : str = jaxlib.__version__ __snake_case : Optional[Any] = jax.lib.xla_bridge.get_backend().platform __snake_case : int = { '`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': F"""{safetensors_version}""", 'Accelerate version': F"""{accelerate_version}""", 'Accelerate config': F"""{accelerate_config_str}""", 'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""", 'Tensorflow version (GPU?)': F"""{tf_version} ({tf_cuda_available})""", 'Flax version (CPU?/GPU?/TPU?)': F"""{flax_version} ({jax_backend})""", 'Jax version': F"""{jax_version}""", 'JaxLib version': F"""{jaxlib_version}""", 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(_UpperCAmelCase ) ) return info @staticmethod def lowercase_ ( _UpperCAmelCase ): return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
679
def UpperCAmelCase__( __UpperCAmelCase : list ): __snake_case : List[Any] = len(__UpperCAmelCase ) for _ in range(__UpperCAmelCase ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: __snake_case , __snake_case : int = arr[i + 1], arr[i] return arr if __name__ == "__main__": __magic_name__ = list(range(10, 0, -1)) print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
679
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = "rwkv" __UpperCAmelCase = {"max_position_embeddings": "context_length"} def __init__( self , _UpperCAmelCase=50_277 , _UpperCAmelCase=1_024 , _UpperCAmelCase=4_096 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=6 , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ): __snake_case : int = vocab_size __snake_case : Union[str, Any] = context_length __snake_case : Any = hidden_size __snake_case : str = num_hidden_layers __snake_case : int = attention_hidden_size if attention_hidden_size is not None else hidden_size __snake_case : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size __snake_case : List[str] = layer_norm_epsilon __snake_case : List[str] = rescale_every __snake_case : Dict = use_cache __snake_case : str = bos_token_id __snake_case : List[Any] = eos_token_id super().__init__( tie_word_embeddings=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
679
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __magic_name__ = '''pt''' elif is_tf_available(): __magic_name__ = '''tf''' else: __magic_name__ = '''jax''' class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = PerceiverTokenizer __UpperCAmelCase = False def lowercase_ ( self ): super().setUp() __snake_case : str = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def lowercase_ ( self ): return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' ) def lowercase_ ( self , **_UpperCAmelCase ): return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ): # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. __snake_case : List[Any] = [] for i in range(len(_UpperCAmelCase ) ): try: __snake_case : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) __snake_case : List[Any] = list(filter(lambda _UpperCAmelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _UpperCAmelCase ) ) __snake_case : Dict = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) ) if max_length is not None and len(_UpperCAmelCase ) > max_length: __snake_case : List[str] = toks[:max_length] if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0: while len(_UpperCAmelCase ) < min_length: __snake_case : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __snake_case : List[Any] = [t[0] for t in toks] # Ensure consistency __snake_case : Optional[Any] = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) if " " not in output_txt and len(_UpperCAmelCase ) > 1: __snake_case : List[str] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase ) ) if with_prefix_space: __snake_case : List[Any] = ' ' + output_txt __snake_case : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) return output_txt, output_ids def lowercase_ ( self ): __snake_case : List[Any] = self.perceiver_tokenizer __snake_case : Dict = 'Unicode €.' __snake_case : Union[str, Any] = tokenizer(_UpperCAmelCase ) __snake_case : Dict = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding __snake_case : int = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]Unicode €.[SEP]' ) __snake_case : Optional[Any] = tokenizer('e è é ê ë' ) __snake_case : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['input_ids'] , _UpperCAmelCase ) # decoding __snake_case : str = tokenizer.decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , '[CLS]e è é ê ë[SEP]' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' ) def lowercase_ ( self ): __snake_case : Union[str, Any] = self.perceiver_tokenizer __snake_case : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off __snake_case : str = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on __snake_case : Dict = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) if FRAMEWORK != "jax": __snake_case : List[str] = list(batch.input_ids.numpy()[0] ) else: __snake_case : List[Any] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def lowercase_ ( self ): __snake_case : Dict = self.perceiver_tokenizer __snake_case : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , _UpperCAmelCase ) self.assertIn('attention_mask' , _UpperCAmelCase ) self.assertNotIn('decoder_input_ids' , _UpperCAmelCase ) self.assertNotIn('decoder_attention_mask' , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : List[str] = self.perceiver_tokenizer __snake_case : Tuple = [ 'Summary of the text.', 'Another summary.', ] __snake_case : int = tokenizer( text_target=_UpperCAmelCase , max_length=32 , padding='max_length' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def lowercase_ ( self ): # safety check on max_len default value so we are sure the test works __snake_case : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __snake_case : Optional[int] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Optional[Any] = ' He is very happy, UNwant\u00E9d,running' __snake_case : Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) __snake_case : str = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) __snake_case : List[str] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) shutil.rmtree(_UpperCAmelCase ) __snake_case : Dict = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc __snake_case : Tuple = tempfile.mkdtemp() __snake_case : Optional[int] = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) __snake_case : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) __snake_case : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) tokenizer.save_pretrained(_UpperCAmelCase ) __snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase ) __snake_case : Optional[Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __snake_case : List[Any] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: __snake_case : Any = json.load(_UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: __snake_case : List[str] = json.load(_UpperCAmelCase ) __snake_case : List[str] = [F"""<extra_id_{i}>""" for i in range(125 )] __snake_case : Dict = added_tokens_extra_ids + [ 'an_additional_special_token' ] __snake_case : List[Any] = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(_UpperCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) with open(os.path.join(_UpperCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(_UpperCAmelCase , _UpperCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __snake_case : Optional[Any] = tokenizer_class.from_pretrained( _UpperCAmelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __snake_case : Any = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_UpperCAmelCase )] __snake_case : str = tokenizer_class.from_pretrained( _UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def lowercase_ ( self ): __snake_case : Tuple = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '�' ) def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): pass def lowercase_ ( self ): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens __snake_case : Optional[Any] = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): __snake_case : Union[str, Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]'] __snake_case : Tuple = tokenizer.convert_tokens_to_string(_UpperCAmelCase ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
679
1
import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=None , _UpperCAmelCase=2 , ): __snake_case : List[Any] = parent __snake_case : List[Any] = batch_size __snake_case : Dict = image_size __snake_case : int = patch_size __snake_case : Optional[Any] = num_channels __snake_case : Union[str, Any] = is_training __snake_case : int = use_labels __snake_case : Any = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Dict = hidden_act __snake_case : List[str] = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : List[str] = type_sequence_label_size __snake_case : Optional[int] = initializer_range __snake_case : Optional[Any] = scope __snake_case : List[str] = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __snake_case : int = (image_size // patch_size) ** 2 __snake_case : Union[str, Any] = num_patches + 1 def lowercase_ ( self ): __snake_case : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : str = None if self.use_labels: __snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Tuple = self.get_config() return config, pixel_values, labels def lowercase_ ( self ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : int = ViTModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : Union[str, Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Tuple = ViTForMaskedImageModeling(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : Optional[int] = model(_UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __snake_case : str = 1 __snake_case : List[Any] = ViTForMaskedImageModeling(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case : List[Any] = model(_UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __snake_case : Optional[int] = self.type_sequence_label_size __snake_case : Optional[int] = ViTForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : int = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images __snake_case : List[Any] = 1 __snake_case : Optional[Any] = ViTForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __snake_case : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __snake_case : int = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase_ ( self ): __snake_case : List[Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : str = config_and_inputs __snake_case : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase): """simple docstring""" __UpperCAmelCase = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) __UpperCAmelCase = ( {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) __UpperCAmelCase = True __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def lowercase_ ( self ): __snake_case : Union[str, Any] = ViTModelTester(self ) __snake_case : Tuple = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def lowercase_ ( self ): self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def lowercase_ ( self ): pass def lowercase_ ( self ): __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Dict = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __snake_case : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def lowercase_ ( self ): __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : List[str] = model_class(_UpperCAmelCase ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : List[str] = [*signature.parameters.keys()] __snake_case : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def lowercase_ ( self ): __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase ) def lowercase_ ( self ): __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def lowercase_ ( self ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Dict = ViTModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def UpperCAmelCase__( ): __snake_case : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase): """simple docstring""" @cached_property def lowercase_ ( self ): return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None @slow def lowercase_ ( self ): __snake_case : List[str] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(_UpperCAmelCase ) __snake_case : int = self.default_image_processor __snake_case : str = prepare_img() __snake_case : int = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : Optional[Any] = model(**_UpperCAmelCase ) # verify the logits __snake_case : Dict = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) __snake_case : Tuple = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowercase_ ( self ): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. __snake_case : Any = ViTModel.from_pretrained('facebook/dino-vits8' ).to(_UpperCAmelCase ) __snake_case : Optional[Any] = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 ) __snake_case : Optional[int] = prepare_img() __snake_case : Union[str, Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ) __snake_case : Tuple = inputs.pixel_values.to(_UpperCAmelCase ) # forward pass with torch.no_grad(): __snake_case : str = model(_UpperCAmelCase , interpolate_pos_encoding=_UpperCAmelCase ) # verify the logits __snake_case : Optional[Any] = torch.Size((1, 3_601, 384) ) self.assertEqual(outputs.last_hidden_state.shape , _UpperCAmelCase ) __snake_case : Union[str, Any] = torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow @require_accelerate @require_torch_gpu def lowercase_ ( self ): __snake_case : Any = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' ) __snake_case : List[str] = self.default_image_processor __snake_case : int = prepare_img() __snake_case : Optional[Any] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ) __snake_case : int = inputs.pixel_values.to(_UpperCAmelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): __snake_case : Union[str, Any] = model(_UpperCAmelCase )
679
from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = None # Automatically constructed __UpperCAmelCase = "dict" __UpperCAmelCase = None __UpperCAmelCase = field(default="Translation" , init=UpperCamelCase , repr=UpperCamelCase) def __call__( self ): return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase_ ( self ): from .features import Value return {k: Value('string' ) for k in sorted(self.languages )} @dataclass class __SCREAMING_SNAKE_CASE : """simple docstring""" __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = None # Automatically constructed __UpperCAmelCase = "dict" __UpperCAmelCase = None __UpperCAmelCase = field(default="TranslationVariableLanguages" , init=UpperCamelCase , repr=UpperCamelCase) def lowercase_ ( self ): __snake_case : List[str] = sorted(set(self.languages ) ) if self.languages else None __snake_case : Optional[Any] = len(self.languages ) if self.languages else None def __call__( self ): return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} ) def lowercase_ ( self , _UpperCAmelCase ): __snake_case : Optional[int] = set(self.languages ) if self.languages and set(_UpperCAmelCase ) - lang_set: raise ValueError( F"""Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. __snake_case : Any = [] for lang, text in translation_dict.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. __snake_case , __snake_case : Any = zip(*sorted(_UpperCAmelCase ) ) return {"language": languages, "translation": translations} def lowercase_ ( self ): from .features import Sequence, Value return { "language": Sequence(Value('string' ) ), "translation": Sequence(Value('string' ) ), }
679
1
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __magic_name__ = { '''vocab_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json''' }, '''merges_file''': { '''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt''' }, '''tokenizer_config_file''': { '''facebook/blenderbot_small-90M''': ( '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json''' ) }, } __magic_name__ = { '''facebook/blenderbot_small-90M''': 512, } class __SCREAMING_SNAKE_CASE ( UpperCamelCase): """simple docstring""" __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = BlenderbotSmallTokenizer def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase="<|endoftext|>" , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ): super().__init__( ByteLevelBPETokenizer( vocab=_UpperCAmelCase , merges=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , ) , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , **_UpperCAmelCase , ) __snake_case : List[Any] = add_prefix_space def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ): __snake_case : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ): __snake_case : Union[str, Any] = [self.sep_token_id] __snake_case : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
679
from __future__ import annotations __magic_name__ = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def UpperCAmelCase__( __UpperCAmelCase : list[list[int]] , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int , __UpperCAmelCase : list[list[int]] , ): __snake_case : Optional[int] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) ) ] # the reference grid __snake_case : List[str] = 1 __snake_case : str = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCAmelCase ) ) ] # the action grid __snake_case : Dict = init[0] __snake_case : List[str] = init[1] __snake_case : Optional[Any] = 0 __snake_case : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell __snake_case : Any = [[f, g, x, y]] __snake_case : List[str] = False # flag that is set when search is complete __snake_case : str = False # flag set if we can't find expand while not found and not resign: if len(__UpperCAmelCase ) == 0: raise ValueError('Algorithm is unable to find solution' ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() __snake_case : List[Any] = cell.pop() __snake_case : Optional[int] = next_cell[2] __snake_case : int = next_cell[3] __snake_case : Optional[Any] = next_cell[1] if x == goal[0] and y == goal[1]: __snake_case : Union[str, Any] = True else: for i in range(len(__UpperCAmelCase ) ): # to try out different valid actions __snake_case : Tuple = x + DIRECTIONS[i][0] __snake_case : Tuple = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__UpperCAmelCase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: __snake_case : List[str] = g + cost __snake_case : Optional[Any] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) __snake_case : Dict = 1 __snake_case : Any = i __snake_case : Tuple = [] __snake_case : Dict = goal[0] __snake_case : Optional[int] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: __snake_case : Tuple = x - DIRECTIONS[action[x][y]][0] __snake_case : Optional[Any] = y - DIRECTIONS[action[x][y]][1] __snake_case : Tuple = xa __snake_case : List[str] = ya invpath.append([x, y] ) __snake_case : Dict = [] for i in range(len(__UpperCAmelCase ) ): path.append(invpath[len(__UpperCAmelCase ) - 1 - i] ) return path, action if __name__ == "__main__": __magic_name__ = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] __magic_name__ = [0, 0] # all coordinates are given in format [y,x] __magic_name__ = [len(grid) - 1, len(grid[0]) - 1] __magic_name__ = 1 # the cost map which pushes the path closer to the goal __magic_name__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): __magic_name__ = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map __magic_name__ = 99 __magic_name__ , __magic_name__ = search(grid, init, goal, cost, heuristic) print('''ACTION MAP''') for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
679
1