code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow a = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ '''text-classification''', '''language-modeling''', '''summarization''', '''token-classification''', '''question-answering''', ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) a = logging.getLogger() def _snake_case ( ) -> List[Any]: '''simple docstring''' _A = argparse.ArgumentParser() parser.add_argument('-f' ) _A = parser.parse_args() return args.f def _snake_case ( _snake_case : int , _snake_case : Tuple="eval" ) -> Optional[Any]: '''simple docstring''' _A = os.path.join(_snake_case , F'''{split}_results.json''' ) if os.path.exists(_snake_case ): with open(_snake_case , 'r' ) as f: return json.load(_snake_case ) raise ValueError(F'''can\'t find {path}''' ) a = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = self.get_auto_remove_tmp_dir() _A = F''' run_glue.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 '''.split() with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): run_flax_glue.main() _A = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) @slow def lowerCAmelCase_ ( self : Tuple ): _A = self.get_auto_remove_tmp_dir() _A = F''' run_clm_flax.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): run_clm_flax.main() _A = get_results(_UpperCAmelCase ) self.assertLess(result['eval_perplexity'] , 100 ) @slow def lowerCAmelCase_ ( self : List[str] ): _A = self.get_auto_remove_tmp_dir() _A = F''' run_summarization.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate '''.split() with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): run_summarization_flax.main() _A = get_results(_UpperCAmelCase , split='test' ) self.assertGreaterEqual(result['test_rouge1'] , 10 ) self.assertGreaterEqual(result['test_rouge2'] , 2 ) self.assertGreaterEqual(result['test_rougeL'] , 7 ) self.assertGreaterEqual(result['test_rougeLsum'] , 7 ) @slow def lowerCAmelCase_ ( self : Union[str, Any] ): _A = self.get_auto_remove_tmp_dir() _A = F''' run_mlm.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 '''.split() with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): run_mlm_flax.main() _A = get_results(_UpperCAmelCase ) self.assertLess(result['eval_perplexity'] , 42 ) @slow def lowerCAmelCase_ ( self : int ): _A = self.get_auto_remove_tmp_dir() _A = F''' run_t5_mlm_flax.py --model_name_or_path t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir '''.split() with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): run_ta_mlm_flax.main() _A = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result['eval_accuracy'] , 0.42 ) @slow def lowerCAmelCase_ ( self : Tuple ): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu _A = 7 if get_gpu_count() > 1 else 2 _A = self.get_auto_remove_tmp_dir() _A = F''' run_flax_ner.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 '''.split() with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): run_flax_ner.main() _A = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result['eval_accuracy'] , 0.75 ) self.assertGreaterEqual(result['eval_f1'] , 0.3 ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.get_auto_remove_tmp_dir() _A = F''' run_qa.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 '''.split() with patch.object(_UpperCAmelCase , 'argv' , _UpperCAmelCase ): run_qa.main() _A = get_results(_UpperCAmelCase ) self.assertGreaterEqual(result['eval_f1'] , 30 ) self.assertGreaterEqual(result['eval_exact'] , 30 )
315
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
1
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) a = { '''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''', '''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''', '''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''', '''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''', '''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''', '''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''', '''mask_downscaling.0''': '''mask_embed.conv1''', '''mask_downscaling.1''': '''mask_embed.layer_norm1''', '''mask_downscaling.3''': '''mask_embed.conv2''', '''mask_downscaling.4''': '''mask_embed.layer_norm2''', '''mask_downscaling.6''': '''mask_embed.conv3''', '''point_embeddings''': '''point_embed''', '''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''', '''image_encoder''': '''vision_encoder''', '''neck.0''': '''neck.conv1''', '''neck.1''': '''neck.layer_norm1''', '''neck.2''': '''neck.conv2''', '''neck.3''': '''neck.layer_norm2''', '''patch_embed.proj''': '''patch_embed.projection''', '''.norm''': '''.layer_norm''', '''blocks''': '''layers''', } def _snake_case ( _snake_case : Any ) -> Optional[Any]: '''simple docstring''' _A = {} state_dict.pop('pixel_mean' , _snake_case ) state_dict.pop('pixel_std' , _snake_case ) _A = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _A = key.replace(_snake_case , _snake_case ) if re.match(_snake_case , _snake_case ): _A = int(re.match(_snake_case , _snake_case ).group(2 ) ) if layer_nb == 0: _A = key.replace('layers.0' , 'proj_in' ) elif layer_nb == 1: _A = key.replace('layers.1' , 'layers.0' ) elif layer_nb == 2: _A = key.replace('layers.2' , 'proj_out' ) _A = value _A = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def _snake_case ( _snake_case : Any , _snake_case : Dict , _snake_case : List[str] , _snake_case : str="ybelkada/segment-anything" ) -> str: '''simple docstring''' _A = hf_hub_download(_snake_case , F'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: _A = SamConfig() elif "sam_vit_l" in model_name: _A = SamVisionConfig( hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) _A = SamConfig( vision_config=_snake_case , ) elif "sam_vit_h" in model_name: _A = SamVisionConfig( hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) _A = SamConfig( vision_config=_snake_case , ) _A = torch.load(_snake_case , map_location='cpu' ) _A = replace_keys(_snake_case ) _A = SamImageProcessor() _A = SamProcessor(image_processor=_snake_case ) _A = SamModel(_snake_case ) hf_model.load_state_dict(_snake_case ) _A = hf_model.to('cuda' ) _A = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' _A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('RGB' ) _A = [[[4_00, 6_50]]] _A = [[1]] _A = processor(images=np.array(_snake_case ) , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _A = hf_model(**_snake_case ) _A = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 _A = processor( images=np.array(_snake_case ) , input_points=_snake_case , input_labels=_snake_case , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _A = hf_model(**_snake_case ) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 _A = ((75, 2_75, 17_25, 8_50),) _A = processor(images=np.array(_snake_case ) , input_boxes=_snake_case , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _A = hf_model(**_snake_case ) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. _A = [[[4_00, 6_50], [8_00, 6_50]]] _A = [[1, 1]] _A = processor( images=np.array(_snake_case ) , input_points=_snake_case , input_labels=_snake_case , return_tensors='pt' ).to('cuda' ) with torch.no_grad(): _A = hf_model(**_snake_case ) _A = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": a = argparse.ArgumentParser() a = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195'''] parser.add_argument( '''--model_name''', default='''sam_vit_h_4b8939''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) parser.add_argument( '''--model_hub_id''', default='''ybelkada/segment-anything''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) a = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
315
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
1
"""simple docstring""" import os import sys import unittest a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) a = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') a = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[Any] ): _A = get_test_to_tester_mapping(_UpperCAmelCase ) _A = get_test_to_tester_mapping(_UpperCAmelCase ) _A = {'BertModelTest': 'BertModelTester'} _A = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _A = get_model_to_test_mapping(_UpperCAmelCase ) _A = get_model_to_test_mapping(_UpperCAmelCase ) _A = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } _A = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Any ): _A = get_model_to_tester_mapping(_UpperCAmelCase ) _A = get_model_to_tester_mapping(_UpperCAmelCase ) _A = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } _A = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
315
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''', '''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''', '''kssteven/ibert-roberta-large-mnli''': ( '''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json''' ), } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[str] = '''ibert''' def __init__( self : Dict , _UpperCAmelCase : List[str]=30_522 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : str=3_072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : str=1E-1_2 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : Tuple=0 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Dict="absolute" , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[Any]="none" , **_UpperCAmelCase : List[Any] , ): super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = quant_mode _A = force_dequant class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' @property def lowerCAmelCase_ ( self : int ): if self.task == "multiple-choice": _A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
315
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class lowercase_ : '''simple docstring''' UpperCAmelCase : int = MBartConfig UpperCAmelCase : Any = {} UpperCAmelCase : Tuple = '''gelu''' def __init__( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : int=99 , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Union[str, Any]=37 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[str]=20 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : Any=0 , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase_ ( self : Tuple ): _A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A = tf.concat([input_ids, eos_tensor] , axis=1 ) _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A = prepare_mbart_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ): _A = TFMBartModel(config=_UpperCAmelCase ).get_decoder() _A = inputs_dict['input_ids'] _A = input_ids[:1, :] _A = inputs_dict['attention_mask'][:1, :] _A = inputs_dict['head_mask'] _A = 1 # first forward pass _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) _A , _A = outputs.to_tuple() _A = past_key_values[1] def _snake_case ( _snake_case : str , _snake_case : List[str] , _snake_case : str , _snake_case : Dict=None , _snake_case : Optional[int]=None , _snake_case : List[str]=None , _snake_case : str=None , _snake_case : Optional[int]=None , ) -> Tuple: '''simple docstring''' if attention_mask is None: _A = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () UpperCAmelCase : Union[str, Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else () UpperCAmelCase : Tuple = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) UpperCAmelCase : Optional[int] = True UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : Union[str, Any] = False def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] ): if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def lowerCAmelCase_ ( self : str ): _A = TFMBartModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Dict ): _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_tf class lowercase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Optional[int] = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] UpperCAmelCase : str = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] UpperCAmelCase : Dict = '''facebook/mbart-large-en-ro''' @cached_property def lowerCAmelCase_ ( self : List[str] ): return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase_ ( self : List[str] ): _A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def lowerCAmelCase_ ( self : Union[str, Any] , **_UpperCAmelCase : Any ): _A = self.translate_src_text(**_UpperCAmelCase ) self.assertListEqual(self.expected_text , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , **_UpperCAmelCase : List[Any] ): _A = self.tokenizer(self.src_text , **_UpperCAmelCase , return_tensors='tf' ) _A = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) _A = self.tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) return generated_words @slow def lowerCAmelCase_ ( self : Any ): self._assert_generated_batch_equal_expected()
315
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''', '''Salesforce/blip-vqa-capfit-large''': ( '''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-base''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json''' ), '''Salesforce/blip-image-captioning-large''': ( '''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json''' ), '''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''', '''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''', '''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''', '''Salesforce/blip-itm-large-flikr''': ( '''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json''' ), } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Dict = '''blip_text_model''' def __init__( self : int , _UpperCAmelCase : Optional[int]=30_524 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Tuple=3_072 , _UpperCAmelCase : Optional[Any]=768 , _UpperCAmelCase : List[Any]=12 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : List[Any]=512 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : int=1E-1_2 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Dict=102 , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=True , **_UpperCAmelCase : List[str] , ): super().__init__( pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , sep_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) _A = vocab_size _A = hidden_size _A = encoder_hidden_size _A = intermediate_size _A = projection_dim _A = hidden_dropout_prob _A = num_hidden_layers _A = num_attention_heads _A = max_position_embeddings _A = layer_norm_eps _A = hidden_act _A = initializer_range _A = attention_probs_dropout_prob _A = is_decoder _A = use_cache @classmethod def lowerCAmelCase_ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Any ): cls._set_token_in_kwargs(_UpperCAmelCase ) _A , _A = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the text config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": _A = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = '''blip_vision_model''' def __init__( self : List[str] , _UpperCAmelCase : Dict=768 , _UpperCAmelCase : Any=3_072 , _UpperCAmelCase : int=512 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : int=384 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Optional[Any]=1E-5 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : List[Any]=1E-1_0 , **_UpperCAmelCase : int , ): super().__init__(**_UpperCAmelCase ) _A = hidden_size _A = intermediate_size _A = projection_dim _A = num_hidden_layers _A = num_attention_heads _A = patch_size _A = image_size _A = initializer_range _A = attention_dropout _A = layer_norm_eps _A = hidden_act @classmethod def lowerCAmelCase_ ( cls : Union[str, Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Dict ): cls._set_token_in_kwargs(_UpperCAmelCase ) _A , _A = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('model_type' ) == "blip": _A = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Dict = '''blip''' UpperCAmelCase : Optional[Any] = True def __init__( self : Dict , _UpperCAmelCase : Any=None , _UpperCAmelCase : int=None , _UpperCAmelCase : int=512 , _UpperCAmelCase : List[Any]=2.6592 , _UpperCAmelCase : Any=256 , **_UpperCAmelCase : Dict , ): super().__init__(**_UpperCAmelCase ) if text_config is None: _A = {} logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' ) if vision_config is None: _A = {} logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' ) _A = BlipTextConfig(**_UpperCAmelCase ) _A = BlipVisionConfig(**_UpperCAmelCase ) _A = self.vision_config.hidden_size _A = projection_dim _A = logit_scale_init_value _A = 1.0 _A = 0.02 _A = image_text_hidden_size @classmethod def lowerCAmelCase_ ( cls : Union[str, Any] , _UpperCAmelCase : BlipTextConfig , _UpperCAmelCase : BlipVisionConfig , **_UpperCAmelCase : int ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _A = copy.deepcopy(self.__dict__ ) _A = self.text_config.to_dict() _A = self.vision_config.to_dict() _A = self.__class__.model_type return output
315
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
1
"""simple docstring""" def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' _A = [[] for _ in range(_snake_case )] _A = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1 or len(_snake_case ) <= key: return input_string for position, character in enumerate(_snake_case ): _A = position % (lowest * 2) # puts it in bounds _A = min(_snake_case , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_snake_case ) _A = [''.join(_snake_case ) for row in temp_grid] _A = ''.join(_snake_case ) return output_string def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' _A = [] _A = key - 1 if key <= 0: raise ValueError('Height of grid can\'t be 0 or negative' ) if key == 1: return input_string _A = [[] for _ in range(_snake_case )] # generates template for position in range(len(_snake_case ) ): _A = position % (lowest * 2) # puts it in bounds _A = min(_snake_case , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('*' ) _A = 0 for row in temp_grid: # fills in the characters _A = input_string[counter : counter + len(_snake_case )] grid.append(list(_snake_case ) ) counter += len(_snake_case ) _A = '' # reads as zigzag for position in range(len(_snake_case ) ): _A = position % (lowest * 2) # puts it in bounds _A = min(_snake_case , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def _snake_case ( _snake_case : str ) -> dict[int, str]: '''simple docstring''' _A = {} for key_guess in range(1 , len(_snake_case ) ): # tries every key _A = decrypt(_snake_case , _snake_case ) return results if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
1
"""simple docstring""" import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def _snake_case ( _snake_case : str , _snake_case : str , **_snake_case : Dict ) -> Union[str, Any]: '''simple docstring''' _A = AutoConfig.from_pretrained(_snake_case , **_snake_case ) _A = AutoModelForSeqaSeqLM.from_config(_snake_case ) model.save_pretrained(_snake_case ) AutoTokenizer.from_pretrained(_snake_case ).save_pretrained(_snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
315
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[str] = '''lxmert''' UpperCAmelCase : Tuple = {} def __init__( self : List[Any] , _UpperCAmelCase : int=30_522 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=9_500 , _UpperCAmelCase : Tuple=1_600 , _UpperCAmelCase : int=400 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : Dict="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=9 , _UpperCAmelCase : Tuple=5 , _UpperCAmelCase : List[Any]=5 , _UpperCAmelCase : Union[str, Any]=2_048 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Tuple=6.67 , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=True , **_UpperCAmelCase : Optional[int] , ): _A = vocab_size _A = hidden_size _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = num_qa_labels _A = num_object_labels _A = num_attr_labels _A = l_layers _A = x_layers _A = r_layers _A = visual_feat_dim _A = visual_pos_dim _A = visual_loss_normalizer _A = task_matched _A = task_mask_lm _A = task_obj_predict _A = task_qa _A = visual_obj_loss _A = visual_attr_loss _A = visual_feat_loss _A = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers} super().__init__(**_UpperCAmelCase )
315
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
1
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) a = _symbol_database.Default() a = _descriptor_pool.Default().AddSerializedFile( B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03''' ) a = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals) if _descriptor._USE_C_DESCRIPTORS is False: a = None a = B'''H\003''' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" a = 45 a = 1_581 a = 1_517 a = 1_570 a = 1_584 a = 1_793 a = 1_795 a = 1_916 a = 1_864 a = 1_905 a = 1_919 a = 2_429 a = 2_208 a = 2_418 a = 2_323 a = 2_407 # @@protoc_insertion_point(module_scope)
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging a = logging.get_logger(__name__) def _snake_case ( _snake_case : Dict , _snake_case : int ) -> List[str]: '''simple docstring''' _A = nn.functional.normalize(_snake_case ) _A = nn.functional.normalize(_snake_case ) return torch.mm(_snake_case , normalized_text_embeds.t() ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Dict = CLIPConfig UpperCAmelCase : str = ['''CLIPEncoderLayer'''] def __init__( self : Optional[int] , _UpperCAmelCase : CLIPConfig ): super().__init__(_UpperCAmelCase ) _A = CLIPVisionModel(config.vision_config ) _A = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase ) _A = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _A = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _A = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase ) _A = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase ) @torch.no_grad() def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ): _A = self.vision_model(_UpperCAmelCase )[1] # pooled_output _A = self.visual_projection(_UpperCAmelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _A = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy() _A = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy() _A = [] _A = image_embeds.shape[0] for i in range(_UpperCAmelCase ): _A = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images _A = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): _A = special_cos_dist[i][concept_idx] _A = self.special_care_embeds_weights[concept_idx].item() _A = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} ) _A = 0.01 for concept_idx in range(len(cos_dist[0] ) ): _A = cos_dist[i][concept_idx] _A = self.concept_embeds_weights[concept_idx].item() _A = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(_UpperCAmelCase ) result.append(_UpperCAmelCase ) _A = [len(res['bad_concepts'] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : torch.FloatTensor ): _A = self.vision_model(_UpperCAmelCase )[1] # pooled_output _A = self.visual_projection(_UpperCAmelCase ) _A = cosine_distance(_UpperCAmelCase , self.special_care_embeds ) _A = cosine_distance(_UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images _A = 0.0 _A = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) _A = torch.any(special_scores > 0 , dim=1 ) _A = special_care * 0.01 _A = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) _A = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) _A = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" from __future__ import annotations from typing import Any def _snake_case ( _snake_case : list ) -> int: '''simple docstring''' if not postfix_notation: return 0 _A = {'+', '-', '*', '/'} _A = [] for token in postfix_notation: if token in operations: _A , _A = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(_snake_case ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : str ) -> list[int]: '''simple docstring''' return [ord(_snake_case ) - 96 for elem in plain] def _snake_case ( _snake_case : list[int] ) -> str: '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def _snake_case ( ) -> None: '''simple docstring''' _A = encode(input('-> ' ).strip().lower() ) print('Encoded: ' , _snake_case ) print('Decoded:' , decode(_snake_case ) ) if __name__ == "__main__": main()
315
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
1
"""simple docstring""" def _snake_case ( _snake_case : int ) -> int: '''simple docstring''' _A = abs(_snake_case ) _A = 0 while n > 0: res += n % 10 n //= 10 return res def _snake_case ( _snake_case : int ) -> int: '''simple docstring''' _A = abs(_snake_case ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def _snake_case ( _snake_case : int ) -> int: '''simple docstring''' return sum(int(_snake_case ) for c in str(abs(_snake_case ) ) ) def _snake_case ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(_snake_case : Callable , _snake_case : int ) -> None: _A = F'''{func.__name__}({value})''' _A = timeit(F'''__main__.{call}''' , setup='import __main__' ) print(F'''{call:56} = {func(_snake_case )} -- {timing:.4f} seconds''' ) for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(_snake_case , _snake_case ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
315
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import os import time import numpy as np import onnxruntime as ort a = '''1''' a = '''0''' a = '''1''' a = ort.SessionOptions() a = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('''Create inference session...''') a = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider'''] a = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider) a = ort.RunOptions() a = 128 a = 1 a = np.ones((batch, sequence), dtype=np.intaa) a = np.ones((batch, sequence), dtype=np.intaa) a = np.ones((batch, sequence), dtype=np.intaa) print('''Warm up phase...''') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Start inference...''') a = time.time() a = 2_000 a = {} for iter in range(max_iters): a = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1_000 / max_iters))
315
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
1
"""simple docstring""" import functools from typing import Any def _snake_case ( _snake_case : str , _snake_case : list[str] ) -> bool: '''simple docstring''' if not isinstance(_snake_case , _snake_case ) or len(_snake_case ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(_snake_case , _snake_case ) or not all( isinstance(_snake_case , _snake_case ) and len(_snake_case ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie _A = {} _A = 'WORD_KEEPER' for word in words: _A = trie for c in word: if c not in trie_node: _A = {} _A = trie_node[c] _A = True _A = len(_snake_case ) # Dynamic programming method @functools.cache def is_breakable(_snake_case : int ) -> bool: if index == len_string: return True _A = trie for i in range(_snake_case , _snake_case ): _A = trie_node.get(string[i] , _snake_case ) if trie_node is None: return False if trie_node.get(_snake_case , _snake_case ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) a = { '''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig'''] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''ConvNextFeatureExtractor'''] a = ['''ConvNextImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvNextForImageClassification''', '''ConvNextModel''', '''ConvNextPreTrainedModel''', '''ConvNextBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''TFConvNextForImageClassification''', '''TFConvNextModel''', '''TFConvNextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
315
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
315
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Any ): _A = tempfile.mkdtemp() # fmt: off _A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) _A = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } _A = os.path.join(self.tmpdirname , _UpperCAmelCase ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , **_UpperCAmelCase : List[str] ): return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , **_UpperCAmelCase : Tuple ): return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self : List[str] ): _A = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _A = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.get_tokenizer() _A = self.get_image_processor() _A = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _A = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _A = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _A = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 ) _A = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) _A = self.prepare_image_inputs() _A = image_processor(_UpperCAmelCase , return_tensors='np' ) _A = processor(images=_UpperCAmelCase , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase_ ( self : str ): _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) _A = 'lower newer' _A = processor(text=_UpperCAmelCase ) _A = tokenizer(_UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase_ ( self : Tuple ): _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) _A = 'lower newer' _A = self.prepare_image_inputs() _A = processor(text=_UpperCAmelCase , images=_UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(_UpperCAmelCase ): processor() def lowerCAmelCase_ ( self : List[Any] ): _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) _A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _A = processor.batch_decode(_UpperCAmelCase ) _A = tokenizer.batch_decode(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _A = self.get_image_processor() _A = self.get_tokenizer() _A = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase ) _A = 'lower newer' _A = self.prepare_image_inputs() _A = processor(text=_UpperCAmelCase , images=_UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
315
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import os def _snake_case ( _snake_case : str = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(_snake_case ) , _snake_case ) ) as input_file: _A = [ [int(_snake_case ) for element in line.split(',' )] for line in input_file.readlines() ] _A = len(_snake_case ) _A = len(matrix[0] ) _A = [[-1 for _ in range(_snake_case )] for _ in range(_snake_case )] for i in range(_snake_case ): _A = matrix[i][0] for j in range(1 , _snake_case ): for i in range(_snake_case ): _A = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _snake_case ): _A = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): _A = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F'''{solution() = }''')
315
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
1
"""simple docstring""" import os from collections.abc import Iterator def _snake_case ( _snake_case : str = "." ) -> Iterator[str]: '''simple docstring''' for dir_path, dir_names, filenames in os.walk(_snake_case ): _A = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_snake_case )[1] in (".py", ".ipynb"): yield os.path.join(_snake_case , _snake_case ).lstrip('./' ) def _snake_case ( _snake_case : List[Any] ) -> Tuple: '''simple docstring''' return F'''{i * " "}*''' if i else "\n##" def _snake_case ( _snake_case : str , _snake_case : str ) -> str: '''simple docstring''' _A = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_snake_case ) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(_snake_case )} {new_part.replace("_" , " " ).title()}''' ) return new_path def _snake_case ( _snake_case : str = "." ) -> None: '''simple docstring''' _A = '' for filepath in sorted(good_file_paths(_snake_case ) ): _A , _A = os.path.split(_snake_case ) if filepath != old_path: _A = print_path(_snake_case , _snake_case ) _A = (filepath.count(os.sep ) + 1) if filepath else 0 _A = F'''{filepath}/{filename}'''.replace(' ' , '%20' ) _A = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'''{md_prefix(_snake_case )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('''.''')
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
1
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _snake_case ( _snake_case : Any ) -> int: '''simple docstring''' _A , _A = image.size _A , _A = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 _A = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) _A = np.array(_snake_case ).astype(np.floataa ) / 255.0 _A = image[None].transpose(0 , 3 , 1 , 2 ) _A = torch.from_numpy(_snake_case ) return 2.0 * image - 1.0 class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : VQModel , _UpperCAmelCase : UNetaDModel , _UpperCAmelCase : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): super().__init__() self.register_modules(vqvae=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase ) @torch.no_grad() def __call__( self : str , _UpperCAmelCase : Union[torch.Tensor, PIL.Image.Image] = None , _UpperCAmelCase : Optional[int] = 1 , _UpperCAmelCase : Optional[int] = 100 , _UpperCAmelCase : Optional[float] = 0.0 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ): if isinstance(_UpperCAmelCase , PIL.Image.Image ): _A = 1 elif isinstance(_UpperCAmelCase , torch.Tensor ): _A = image.shape[0] else: raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCAmelCase )}''' ) if isinstance(_UpperCAmelCase , PIL.Image.Image ): _A = preprocess(_UpperCAmelCase ) _A , _A = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image _A = (batch_size, self.unet.config.in_channels // 2, height, width) _A = next(self.unet.parameters() ).dtype _A = randn_tensor(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) _A = image.to(device=self.device , dtype=_UpperCAmelCase ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_UpperCAmelCase , device=self.device ) _A = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler _A = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _A = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _A = {} if accepts_eta: _A = eta for t in self.progress_bar(_UpperCAmelCase ): # concat latents and low resolution image in the channel dimension. _A = torch.cat([latents, image] , dim=1 ) _A = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual _A = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample # compute the previous noisy sample x_t -> x_t-1 _A = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample # decode the image latents with the VQVAE _A = self.vqvae.decode(_UpperCAmelCase ).sample _A = torch.clamp(_UpperCAmelCase , -1.0 , 1.0 ) _A = image / 2 + 0.5 _A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _A = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCAmelCase )
315
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
1
"""simple docstring""" from collections.abc import Callable import numpy as np def _snake_case ( _snake_case : Callable , _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float ) -> np.array: '''simple docstring''' _A = int(np.ceil((x_end - xa) / step_size ) ) _A = np.zeros((n + 1,) ) _A = ya _A = xa for k in range(_snake_case ): _A = y[k] + step_size * ode_func(_snake_case , y[k] ) _A = y[k] + ( (step_size / 2) * (ode_func(_snake_case , y[k] ) + ode_func(x + step_size , _snake_case )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
1
"""simple docstring""" def _snake_case ( _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , ) -> float: '''simple docstring''' _A = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('All input parameters must be positive' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('Relative densities cannot be greater than one' ) else: _A = 1 - (matter_density + radiation_density + dark_energy) _A = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) _A = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation a = 0.3 print( hubble_parameter( hubble_constant=6_8.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
315
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Tuple = '''megatron-bert''' def __init__( self : Any , _UpperCAmelCase : int=29_056 , _UpperCAmelCase : int=1_024 , _UpperCAmelCase : List[Any]=24 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : str=4_096 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : Tuple=1E-1_2 , _UpperCAmelCase : str=0 , _UpperCAmelCase : str="absolute" , _UpperCAmelCase : Any=True , **_UpperCAmelCase : List[str] , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache
315
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
1
"""simple docstring""" from collections import defaultdict def _snake_case ( _snake_case : int ) -> int: '''simple docstring''' _A = 1 _A = True for v in tree[start]: if v not in visited: ret += dfs(_snake_case ) if ret % 2 == 0: cuts.append(_snake_case ) return ret def _snake_case ( ) -> str: '''simple docstring''' dfs(1 ) if __name__ == "__main__": a , a = 10, 9 a = defaultdict(list) a = {} a = [] a = 0 a = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
315
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
1
"""simple docstring""" import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Dict = ProphetNetTokenizer UpperCAmelCase : Optional[Any] = False def lowerCAmelCase_ ( self : Optional[int] ): super().setUp() _A = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Any ): _A = 'UNwant\u00E9d,running' _A = 'unwanted, running' return input_text, output_text def lowerCAmelCase_ ( self : int ): _A = self.tokenizer_class(self.vocab_file ) _A = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] ) def lowerCAmelCase_ ( self : str ): _A = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def lowerCAmelCase_ ( self : List[str] ): _A = BasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self : Dict ): _A = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def lowerCAmelCase_ ( self : int ): _A = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self : Tuple ): _A = BasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self : Dict ): _A = BasicTokenizer(do_lower_case=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self : Dict ): _A = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self : Dict ): _A = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def lowerCAmelCase_ ( self : str ): _A = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] _A = {} for i, token in enumerate(_UpperCAmelCase ): _A = i _A = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def lowerCAmelCase_ ( self : str ): _A = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) _A = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _A = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102] _A = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' ) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) _A = list(batch.input_ids.numpy()[0] ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def lowerCAmelCase_ ( self : Any ): self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def lowerCAmelCase_ ( self : List[Any] ): self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def lowerCAmelCase_ ( self : Any ): self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def lowerCAmelCase_ ( self : Dict ): _A = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) _A = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase ) _A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase ) _A = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
315
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" def _snake_case ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool: '''simple docstring''' _A = set() # Replace all the whitespace in our sentence _A = input_str.replace(' ' , '' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(_snake_case ) == 26 def _snake_case ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool: '''simple docstring''' _A = [False] * 26 for char in input_str: if char.islower(): _A = True elif char.isupper(): _A = True return all(_snake_case ) def _snake_case ( _snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool: '''simple docstring''' return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _snake_case ( ) -> None: '''simple docstring''' from timeit import timeit _A = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest' print(timeit('is_pangram()' , setup=_snake_case ) ) print(timeit('is_pangram_faster()' , setup=_snake_case ) ) print(timeit('is_pangram_fastest()' , setup=_snake_case ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
315
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
1
"""simple docstring""" from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration a = HfArgumentParser(InitializationArguments) a = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization a = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks a = { '''vocab_size''': len(tokenizer), '''scale_attn_by_inverse_layer_idx''': True, '''reorder_and_upcast_attn''': True, } # Load model config (GPT-2 large in this case) a = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config a = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
315
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
1
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class lowercase_ : '''simple docstring''' def __init__( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Any=13 , _UpperCAmelCase : Optional[int]=7 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Any=33 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : str=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : int=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : Any=0.02 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Dict=None , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope def lowerCAmelCase_ ( self : int ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Tuple ): return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ): _A = EsmModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) _A = model(_UpperCAmelCase ) _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ): _A = EsmForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ): _A = self.num_labels _A = EsmForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Any ): _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : Optional[Any] = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) UpperCAmelCase : List[Any] = () UpperCAmelCase : Tuple = ( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase : Optional[Any] = True def lowerCAmelCase_ ( self : str ): _A = EsmModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : Tuple ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : List[str] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _A = type self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : int ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : List[str] ): for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = EsmModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = self.model_tester.prepare_config_and_inputs()[0] _A = EsmEmbeddings(config=_UpperCAmelCase ) _A = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) _A = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) _A = create_position_ids_from_input_ids(_UpperCAmelCase , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(_UpperCAmelCase , _UpperCAmelCase ) ) ) def lowerCAmelCase_ ( self : Optional[int] ): _A = self.model_tester.prepare_config_and_inputs()[0] _A = EsmEmbeddings(config=_UpperCAmelCase ) _A = torch.empty(2 , 4 , 30 ) _A = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] _A = torch.as_tensor([expected_single_positions, expected_single_positions] ) _A = embeddings.create_position_ids_from_inputs_embeds(_UpperCAmelCase ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(_UpperCAmelCase , _UpperCAmelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def lowerCAmelCase_ ( self : List[Any] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowerCAmelCase_ ( self : Tuple ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCAmelCase_ ( self : Union[str, Any] ): pass @require_torch class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Optional[Any] ): with torch.no_grad(): _A = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _A = torch.tensor([[0, 1, 2, 3, 4, 5]] ) _A = model(_UpperCAmelCase )[0] _A = 33 _A = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , _UpperCAmelCase ) _A = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCAmelCase_ ( self : Any ): with torch.no_grad(): _A = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() _A = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) _A = model(_UpperCAmelCase )[0] # compare the actual values for a slice. _A = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
315
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
1
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ) -> List[str]: '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_snake_case ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ) -> Union[str, Any]: '''simple docstring''' with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ) -> int: '''simple docstring''' with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_snake_case ): http_head('https://huggingface.co' )
315
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
1
"""simple docstring""" import math def _snake_case ( _snake_case : int ) -> list: '''simple docstring''' _A = [True] * n _A = False _A = False _A = True for i in range(3 , int(n**0.5 + 1 ) , 2 ): _A = i * 2 while index < n: _A = False _A = index + i _A = [2] for i in range(3 , _snake_case , 2 ): if is_prime[i]: primes.append(_snake_case ) return primes def _snake_case ( _snake_case : int = 99_99_66_66_33_33 ) -> int: '''simple docstring''' _A = math.floor(math.sqrt(_snake_case ) ) + 1_00 _A = prime_sieve(_snake_case ) _A = 0 _A = 0 _A = primes[prime_index] while (last_prime**2) <= limit: _A = primes[prime_index + 1] _A = last_prime**2 _A = next_prime**2 # Get numbers divisible by lps(current) _A = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) _A = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps _A = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair _A = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument a = { '''/attention/''': '''/0/SelfAttention/''', '''/self_attention/''': '''/0/SelfAttention/''', '''/encoder_decoder_attention/''': '''/1/EncDecAttention/''', '''value''': '''v''', '''query''': '''q''', '''key''': '''k''', '''out''': '''o''', '''pre_self_attention_layer_norm''': '''0/layer_norm''', '''pre_cross_attention_layer_norm''': '''1/layer_norm''', '''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong '''token_embedder''': '''shared''', '''encoder_norm''': '''final_layer_norm''', '''decoder_norm''': '''final_layer_norm''', '''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''', '''router/router_weights/w/''': '''router/classifier/''', '''roer/roer_weights/w/''': '''router/classifier/''', '''logits_dense''': '''lm_head''', } def _snake_case ( _snake_case : Union[str, Any] ) -> List[str]: '''simple docstring''' _A = list(s_dict.keys() ) for key in keys: _A = R'.*/layers_(\d+)' _A = key if re.match(_snake_case , _snake_case ): _A = re.sub(R'layers_(\d+)' , R'block/\1/layer' , _snake_case ) _A = R'(encoder|decoder)\/' if re.match(_snake_case , _snake_case ): _A = re.match(_snake_case , _snake_case ).groups() if groups[0] == "encoder": _A = re.sub(R'/mlp/' , R'/1/mlp/' , _snake_case ) _A = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , _snake_case ) elif groups[0] == "decoder": _A = re.sub(R'/mlp/' , R'/2/mlp/' , _snake_case ) _A = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , _snake_case ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: _A = new_key.replace(_snake_case , _snake_case ) print(F'''{key} -> {new_key}''' ) _A = s_dict.pop(_snake_case ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: _A = s_dict[ 'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: _A = s_dict[ 'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: _A = s_dict[key].shape[0] _A = s_dict[key] for idx in range(_snake_case ): _A = expert_weihts[idx] print(F'''{key} -> {key.replace("expert/" , "nested fstring" )}''' ) s_dict.pop(_snake_case ) return s_dict a = { '''NUM_ENCODER_LAYERS''': '''num_layers''', '''NUM_DECODER_LAYERS''': '''num_decoder_layers''', '''NUM_HEADS''': '''num_heads''', '''HEAD_DIM''': '''d_kv''', '''EMBED_DIM''': '''d_model''', '''MLP_DIM''': '''d_ff''', '''NUM_SELECTED_EXPERTS''': '''num_selected_experts''', '''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''', '''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''', '''dense.MlpBlock.activations''': '''feed_forward_proj''', } def _snake_case ( _snake_case : List[Any] , _snake_case : Tuple ) -> Any: '''simple docstring''' import regex as re with open(_snake_case , 'r' ) as f: _A = f.read() _A = re.findall(R'(.*) = ([0-9.]*)' , _snake_case ) _A = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": _A = float(_snake_case ) if '.' in value else int(_snake_case ) _A = re.findall(R'(.*activations) = \(\'(.*)\',\)' , _snake_case )[0] _A = str(activation[1] ) _A = num_experts _A = SwitchTransformersConfig(**_snake_case ) return config def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : List[Any]=None , _snake_case : Optional[Any]="./" , _snake_case : List[str]=8 ) -> int: '''simple docstring''' print(F'''Loading flax weights from : {flax_checkpoint_path}''' ) _A = checkpoints.load_tax_checkpoint(_snake_case ) if gin_file is not None: _A = convert_gin_to_config(_snake_case , _snake_case ) else: _A = SwitchTransformersConfig.from_pretrained(_snake_case ) _A = SwitchTransformersForConditionalGeneration(_snake_case ) _A = flax_params['target'] _A = flatten_dict(_snake_case , sep='/' ) _A = rename_keys(_snake_case ) _A = unflatten_dict(_snake_case , sep='/' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(_snake_case , _snake_case ) print(F'''Save PyTorch model to {pytorch_dump_path}''' ) pt_model.save_pretrained(_snake_case ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--switch_t5x_checkpoint_path''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the''' ''' model architecture. If not provided, a `gin_file` has to be provided.''' ), ) parser.add_argument( '''--gin_file''', default=None, type=str, required=False, help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''', ) parser.add_argument( '''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.''' ) parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''') a = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def _snake_case ( _snake_case : dict ) -> tuple: '''simple docstring''' return (data["data"], data["target"]) def _snake_case ( _snake_case : np.ndarray , _snake_case : np.ndarray , _snake_case : np.ndarray ) -> np.ndarray: '''simple docstring''' _A = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_snake_case , _snake_case ) # Predict target for test data _A = xgb.predict(_snake_case ) _A = predictions.reshape(len(_snake_case ) , 1 ) return predictions def _snake_case ( ) -> None: '''simple docstring''' _A = fetch_california_housing() _A , _A = data_handling(_snake_case ) _A , _A , _A , _A = train_test_split( _snake_case , _snake_case , test_size=0.25 , random_state=1 ) _A = xgboost(_snake_case , _snake_case , _snake_case ) # Error printing print(F'''Mean Absolute Error : {mean_absolute_error(_snake_case , _snake_case )}''' ) print(F'''Mean Square Error : {mean_squared_error(_snake_case , _snake_case )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = 0 UpperCAmelCase : bool = False UpperCAmelCase : float = 3.0 class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Union[str, Any] ): # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} ) self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'a': 2, 'b': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} ) @require_cuda def lowerCAmelCase_ ( self : int ): # If no defaults are changed, `to_kwargs` returns an empty dict. _A = GradScalerKwargs(init_scale=1_024 , growth_factor=2 ) AcceleratorState._reset_state() _A = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) _A = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_000 ) self.assertEqual(scaler._enabled , _UpperCAmelCase ) @require_multi_gpu def lowerCAmelCase_ ( self : Tuple ): _A = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": a = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) a = Accelerator(kwargs_handlers=[ddp_scaler]) a = torch.nn.Linear(100, 200) a = accelerator.prepare(model) # Check the values changed in kwargs a = '''''' a = model.bucket_bytes_cap // (1_024 * 1_024) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
315
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
1
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _snake_case ( _snake_case : Dict=None ) -> int: '''simple docstring''' if subparsers is not None: _A = subparsers.add_parser('env' ) else: _A = argparse.ArgumentParser('Accelerate env command' ) parser.add_argument( '--config_file' , default=_snake_case , help='The config file to use for the default values in the launching script.' ) if subparsers is not None: parser.set_defaults(func=_snake_case ) return parser def _snake_case ( _snake_case : Union[str, Any] ) -> str: '''simple docstring''' _A = torch.__version__ _A = torch.cuda.is_available() _A = is_xpu_available() _A = is_npu_available() _A = 'Not found' # Get the default from the config file. if args.config_file is not None or os.path.isfile(_snake_case ): _A = load_config_from_file(args.config_file ).to_dict() _A = { '`Accelerate` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Numpy version': np.__version__, 'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''', 'PyTorch XPU available': str(_snake_case ), 'PyTorch NPU available': str(_snake_case ), 'System RAM': F'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''', } if pt_cuda_available: _A = torch.cuda.get_device_name() print('\nCopy-and-paste the text below in your GitHub issue\n' ) print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' ) _A = ( '\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(_snake_case , _snake_case ) else F'''\t{accelerate_config}''' ) print(_snake_case ) _A = accelerate_config return info def _snake_case ( ) -> int: '''simple docstring''' _A = env_command_parser() _A = parser.parse_args() env_command(_snake_case ) return 0 if __name__ == "__main__": raise SystemExit(main())
315
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
1
"""simple docstring""" import collections import os import re from pathlib import Path a = '''src/transformers''' # Matches is_xxx_available() a = re.compile(r'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} a = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] a = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available a = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") a = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] a = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", a = re.compile(r'''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], a = re.compile(r'''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo a = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: a = re.compile(r'''^\s*try:''') # Catches a line with else: a = re.compile(r'''^\s*else:''') def _snake_case ( _snake_case : Optional[int] ) -> Optional[int]: '''simple docstring''' if _re_test_backend.search(_snake_case ) is None: return None _A = [b[0] for b in _re_backend.findall(_snake_case )] backends.sort() return "_and_".join(_snake_case ) def _snake_case ( _snake_case : str ) -> Tuple: '''simple docstring''' with open(_snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f: _A = f.readlines() _A = 0 while line_index < len(_snake_case ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_snake_case ): return None # First grab the objects without a specific backend in _import_structure _A = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: _A = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_snake_case ): _A = _re_one_line_import_struct.search(_snake_case ).groups()[0] _A = re.findall(R'\[([^\]]+)\]' , _snake_case ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue _A = _re_import_struct_key_value.search(_snake_case ) if single_line_import_search is not None: _A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_snake_case ) > 0] objects.extend(_snake_case ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 _A = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): _A = lines[line_index] if _re_import_struct_add_one.search(_snake_case ) is not None: objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] ) elif _re_import_struct_add_many.search(_snake_case ) is not None: _A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(', ' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_between_brackets.search(_snake_case ) is not None: _A = _re_between_brackets.search(_snake_case ).groups()[0].split(', ' ) _A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0] objects.extend(_snake_case ) elif _re_quote_object.search(_snake_case ) is not None: objects.append(_re_quote_object.search(_snake_case ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 _A = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _A = [] while ( line_index < len(_snake_case ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 _A = {'none': objects} # Let's continue with backend-specific objects while line_index < len(_snake_case ): # If the line is an if is_backend_available, we grab all objects associated. _A = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _A = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _A = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): _A = lines[line_index] _A = _re_import.search(_snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 _A = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _snake_case ( _snake_case : List[Any] , _snake_case : str ) -> int: '''simple docstring''' def find_duplicates(_snake_case : List[str] ): return [k for k, v in collections.Counter(_snake_case ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _A = [] for key in import_dict_objects.keys(): _A = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _A = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _A = 'base imports' if key == 'none' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def _snake_case ( ) -> Optional[int]: '''simple docstring''' _A = [] for root, _, files in os.walk(_snake_case ): if "__init__.py" in files: _A = os.path.join(_snake_case , '__init__.py' ) _A = parse_init(_snake_case ) if objects is not None: _A = analyze_results(*_snake_case ) if len(_snake_case ) > 0: _A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('\n'.join(_snake_case ) ) if len(_snake_case ) > 0: raise ValueError('\n\n'.join(_snake_case ) ) def _snake_case ( ) -> str: '''simple docstring''' _A = [] for path, directories, files in os.walk(_snake_case ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(_snake_case ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_snake_case ) / folder).glob('*.py' ) ) ) == 0: continue _A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) ) _A = short_path.replace(os.path.sep , '.' ) submodules.append(_snake_case ) for fname in files: if fname == "__init__.py": continue _A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) ) _A = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(_snake_case ) return submodules a = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', '''models.esm.openfold_utils''', ] def _snake_case ( ) -> str: '''simple docstring''' from transformers.utils import direct_transformers_import _A = direct_transformers_import(_snake_case ) _A = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(_snake_case , '__init__.py' ) , 'r' ) as f: _A = f.read() import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , _snake_case ) ) ) _A = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(_snake_case ) > 0: _A = '\n'.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registed in the main init of Transformers:\n' F'''{list_of_modules}\n''' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
315
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=__lowerCAmelCase ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization UpperCAmelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) UpperCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} ) UpperCAmelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} ) UpperCAmelCase : str = "text" UpperCAmelCase : str = "labels" def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Optional[Any] ): if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , _UpperCAmelCase ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) _A = copy.deepcopy(self ) _A = self.label_schema.copy() _A = features[self.label_column] _A = label_schema return task_template @property def lowerCAmelCase_ ( self : Any ): return { self.text_column: "text", self.label_column: "labels", }
315
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
1
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor a = random.Random() def _snake_case ( _snake_case : Optional[int] , _snake_case : Union[str, Any]=1.0 , _snake_case : Any=None , _snake_case : Optional[Any]=None ) -> Optional[Any]: '''simple docstring''' if rng is None: _A = global_rng _A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=7 , _UpperCAmelCase : str=400 , _UpperCAmelCase : Optional[Any]=2_000 , _UpperCAmelCase : int=24 , _UpperCAmelCase : List[str]=24 , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : str=16_000 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Dict=True , ): _A = parent _A = batch_size _A = min_seq_length _A = max_seq_length _A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _A = feature_size _A = num_mel_bins _A = padding_value _A = sampling_rate _A = return_attention_mask _A = do_normalize def lowerCAmelCase_ ( self : Any ): return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Dict=False , _UpperCAmelCase : str=False ): def _flatten(_UpperCAmelCase : List[Any] ): return list(itertools.chain(*_UpperCAmelCase ) ) if equal_length: _A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _A = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _A = [np.asarray(_UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : int = SpeechaTextFeatureExtractor if is_speech_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _A = SpeechaTextFeatureExtractionTester(self ) def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Any ): self.assertTrue(np.all(np.mean(_UpperCAmelCase , axis=0 ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase , axis=0 ) - 1 ) < 1E-3 ) ) def lowerCAmelCase_ ( self : int ): # Tests that all call wrap to encode_plus and batch_encode_plus _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _A = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] # Test feature size _A = feature_extractor(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size ) # Test not batched input _A = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features _A = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) # Test batched _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. _A = [floats_list((1, x) )[0] for x in (800, 800, 800)] _A = np.asarray(_UpperCAmelCase ) _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _A = ['longest', 'max_length', 'do_not_pad'] _A = [None, 16, None] for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ): _A = feature_extractor( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase ) _A = inputs.input_features _A = inputs.attention_mask _A = [np.sum(_UpperCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCAmelCase_ ( self : Any ): _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _A = ['longest', 'max_length', 'do_not_pad'] _A = [None, 16, None] for max_length, padding in zip(_UpperCAmelCase , _UpperCAmelCase ): _A = feature_extractor( _UpperCAmelCase , max_length=_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='np' , return_attention_mask=_UpperCAmelCase ) _A = inputs.input_features _A = inputs.attention_mask _A = [np.sum(_UpperCAmelCase ) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] ) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] ) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 ) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] ) def lowerCAmelCase_ ( self : int ): _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _A = feature_extractor( _UpperCAmelCase , padding='max_length' , max_length=4 , truncation=_UpperCAmelCase , return_tensors='np' , return_attention_mask=_UpperCAmelCase , ) _A = inputs.input_features _A = inputs.attention_mask _A = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1] ) self._check_zero_mean_unit_variance(input_features[2] ) def lowerCAmelCase_ ( self : Dict ): _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _A = feature_extractor( _UpperCAmelCase , padding='longest' , max_length=4 , truncation=_UpperCAmelCase , return_tensors='np' , return_attention_mask=_UpperCAmelCase , ) _A = inputs.input_features _A = inputs.attention_mask _A = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 24) ) _A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _A = feature_extractor( _UpperCAmelCase , padding='longest' , max_length=16 , truncation=_UpperCAmelCase , return_tensors='np' , return_attention_mask=_UpperCAmelCase , ) _A = inputs.input_features _A = inputs.attention_mask _A = np.sum(attention_mask == 1 , axis=1 ) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] ) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] ) self._check_zero_mean_unit_variance(input_features[2] ) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 24) ) def lowerCAmelCase_ ( self : List[str] ): import torch _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _A = np.random.rand(100 , 32 ).astype(np.floataa ) _A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _A = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _A = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Dict ): from datasets import load_dataset _A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech _A = ds.sort('id' ).select(range(_UpperCAmelCase ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def lowerCAmelCase_ ( self : Dict ): # fmt: off _A = np.array([ -1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241, -1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128, -1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625, ] ) # fmt: on _A = self._load_datasamples(1 ) _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _A = feature_extractor(_UpperCAmelCase , return_tensors='pt' ).input_features self.assertEquals(input_features.shape , (1, 584, 24) ) self.assertTrue(np.allclose(input_features[0, 0, :30] , _UpperCAmelCase , atol=1E-4 ) )
315
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = '''deit''' def __init__( self : Optional[int] , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[int]=3_072 , _UpperCAmelCase : List[Any]="gelu" , _UpperCAmelCase : Tuple=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : int=224 , _UpperCAmelCase : List[Any]=16 , _UpperCAmelCase : str=3 , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=16 , **_UpperCAmelCase : Tuple , ): super().__init__(**_UpperCAmelCase ) _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = layer_norm_eps _A = image_size _A = patch_size _A = num_channels _A = qkv_bias _A = encoder_stride class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = version.parse('''1.11''' ) @property def lowerCAmelCase_ ( self : Dict ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCAmelCase_ ( self : List[Any] ): return 1E-4
315
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device a = False class lowercase_ ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : Union[str, Any] ): _A = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' ) # remove text_unet pipe.remove_unused_weights() pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = 'A painting of a squirrel eating a burger ' _A = torch.manual_seed(0 ) _A = pipe( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_UpperCAmelCase ) _A = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = generator.manual_seed(0 ) _A = pipe( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def lowerCAmelCase_ ( self : Tuple ): _A = VersatileDiffusionTextToImagePipeline.from_pretrained( 'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = 'A painting of a squirrel eating a burger ' _A = torch.manual_seed(0 ) _A = pipe( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images _A = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _A = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
315
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
1
"""simple docstring""" import torch from transformers import AutoModel class lowercase_ ( torch.nn.Module ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : Optional[Any]="sayef/fsner-bert-base-uncased" ): super(_UpperCAmelCase , self ).__init__() _A = AutoModel.from_pretrained(_UpperCAmelCase , return_dict=_UpperCAmelCase ) _A = torch.nn.CosineSimilarity(3 , 1E-0_8 ) _A = torch.nn.Softmax(dim=1 ) def lowerCAmelCase_ ( self : Dict , **_UpperCAmelCase : int ): return self.bert(**_UpperCAmelCase ).last_hidden_state def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[str] ): return token_embeddings.sum(2 , keepdim=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]=1 ): return self.softmax(T * self.cos(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ): _A = W_supports['sizes'].tolist() _A = W_supports['start_token_id'].item() _A = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _A = self.BERT(**_UpperCAmelCase ) _A = self.BERT(**_UpperCAmelCase ) _A = None _A = None _A = W_supports['input_ids'] == start_token_id _A = W_supports['input_ids'] == end_token_id for i, size in enumerate(_UpperCAmelCase ): if i == 0: _A = 0 else: _A = support_sizes[i - 1] _A = S[s : s + size][start_token_masks[s : s + size]] _A = S[s : s + size][end_token_masks[s : s + size]] _A = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) _A = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: _A = torch.vstack((p_starts, p_start) ) _A = torch.vstack((p_ends, p_end) ) else: _A = p_start _A = p_end return p_starts, p_ends
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
1
"""simple docstring""" import requests from bsa import BeautifulSoup def _snake_case ( _snake_case : str = "https://www.worldometers.info/coronavirus" ) -> dict: '''simple docstring''' _A = BeautifulSoup(requests.get(_snake_case ).text , 'html.parser' ) _A = soup.findAll('h1' ) _A = soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(_snake_case , _snake_case )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(F'''{key}\n{value}\n''')
315
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = ShapEImgaImgPipeline UpperCAmelCase : Dict = ['''image'''] UpperCAmelCase : List[Any] = ['''image'''] UpperCAmelCase : Optional[Any] = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] UpperCAmelCase : Union[str, Any] = False @property def lowerCAmelCase_ ( self : List[str] ): return 32 @property def lowerCAmelCase_ ( self : str ): return 32 @property def lowerCAmelCase_ ( self : Tuple ): return self.time_input_dim * 4 @property def lowerCAmelCase_ ( self : Optional[int] ): return 8 @property def lowerCAmelCase_ ( self : Union[str, Any] ): torch.manual_seed(0 ) _A = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) _A = CLIPVisionModel(_UpperCAmelCase ) return model @property def lowerCAmelCase_ ( self : Optional[int] ): _A = CLIPImageProcessor( crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , ) return image_processor @property def lowerCAmelCase_ ( self : Union[str, Any] ): torch.manual_seed(0 ) _A = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } _A = PriorTransformer(**_UpperCAmelCase ) return model @property def lowerCAmelCase_ ( self : str ): torch.manual_seed(0 ) _A = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } _A = ShapERenderer(**_UpperCAmelCase ) return model def lowerCAmelCase_ ( self : List[str] ): _A = self.dummy_prior _A = self.dummy_image_encoder _A = self.dummy_image_processor _A = self.dummy_renderer _A = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , ) _A = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=0 ): _A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) if str(_UpperCAmelCase ).startswith('mps' ): _A = torch.manual_seed(_UpperCAmelCase ) else: _A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) _A = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def lowerCAmelCase_ ( self : Optional[int] ): _A = 'cpu' _A = self.get_dummy_components() _A = self.pipeline_class(**_UpperCAmelCase ) _A = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) _A = output.images[0] _A = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) _A = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self : Any ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCAmelCase_ ( self : Tuple ): _A = torch_device == 'cpu' _A = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , ) def lowerCAmelCase_ ( self : int ): _A = self.get_dummy_components() _A = self.pipeline_class(**_UpperCAmelCase ) _A = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = 1 _A = 2 _A = self.get_dummy_inputs(_UpperCAmelCase ) for key in inputs.keys(): if key in self.batch_params: _A = batch_size * [inputs[key]] _A = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[str] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : str ): _A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) _A = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) _A = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) _A = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) _A = pipe( _UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
315
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
1
"""simple docstring""" import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = 1 @register_to_config def __init__( self : str , _UpperCAmelCase : int = 1_000 , _UpperCAmelCase : Optional[Union[np.ndarray, List[float]]] = None ): # set `betas`, `alphas`, `timesteps` self.set_timesteps(_UpperCAmelCase ) # standard deviation of the initial noise distribution _A = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. _A = 4 # running values _A = [] def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, torch.device] = None ): _A = num_inference_steps _A = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] _A = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: _A = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: _A = torch.sin(steps * math.pi / 2 ) ** 2 _A = (1.0 - self.betas**2) ** 0.5 _A = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] _A = timesteps.to(_UpperCAmelCase ) _A = [] def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : int , _UpperCAmelCase : torch.FloatTensor , _UpperCAmelCase : bool = True , ): if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) _A = (self.timesteps == timestep).nonzero().item() _A = timestep_index + 1 _A = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(_UpperCAmelCase ) if len(self.ets ) == 1: _A = self.ets[-1] elif len(self.ets ) == 2: _A = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: _A = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: _A = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) _A = self._get_prev_sample(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : torch.FloatTensor , *_UpperCAmelCase : Dict , **_UpperCAmelCase : Dict ): return sample def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int ): _A = self.alphas[timestep_index] _A = self.betas[timestep_index] _A = self.alphas[prev_timestep_index] _A = self.betas[prev_timestep_index] _A = (sample - sigma * ets) / max(_UpperCAmelCase , 1E-8 ) _A = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self : List[Any] ): return self.config.num_train_timesteps
315
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
1
"""simple docstring""" import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowercase_ : '''simple docstring''' def __init__( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : Dict=30 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Optional[Any]=3 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : str=5 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Optional[Any]=37 , _UpperCAmelCase : str="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Optional[Any]=10 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : str=3 , _UpperCAmelCase : int=0.6 , _UpperCAmelCase : Optional[int]=None , ): _A = parent _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range _A = mask_ratio _A = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) _A = (image_size // patch_size) ** 2 _A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self : Any ): return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] ): _A = ViTMAEModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ): _A = ViTMAEForPreTraining(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase ) _A = (self.image_size // self.patch_size) ** 2 _A = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images _A = 1 _A = ViTMAEForPreTraining(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(_UpperCAmelCase ) _A = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def lowerCAmelCase_ ( self : int ): _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : str = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () UpperCAmelCase : int = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {} UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : List[str] = False UpperCAmelCase : Optional[int] = False UpperCAmelCase : Optional[int] = False def lowerCAmelCase_ ( self : Optional[int] ): _A = ViTMAEModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : int ): self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def lowerCAmelCase_ ( self : Optional[int] ): pass def lowerCAmelCase_ ( self : Union[str, Any] ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def lowerCAmelCase_ ( self : Tuple ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(_UpperCAmelCase ) _A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : str ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ): # make masks reproducible np.random.seed(2 ) _A = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 ) _A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) _A = torch.from_numpy(_UpperCAmelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument _A = pt_noise super().check_pt_tf_models(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) _A = outputs[0].cpu().numpy() _A = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase ) _A = model_class.from_pretrained(_UpperCAmelCase ) model.to(_UpperCAmelCase ) # make random mask reproducible torch.manual_seed(2 ) with torch.no_grad(): _A = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) # Make sure we don't have nans _A = after_outputs[0].cpu().numpy() _A = 0 _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_UpperCAmelCase , 1E-5 ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowerCAmelCase_ ( self : Optional[int] ): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowerCAmelCase_ ( self : Tuple ): pass @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def lowerCAmelCase_ ( self : Optional[Any] ): pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def lowerCAmelCase_ ( self : List[Any] ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowerCAmelCase_ ( self : Union[str, Any] ): pass @slow def lowerCAmelCase_ ( self : Any ): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = ViTMAEModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def _snake_case ( ) -> Optional[int]: '''simple docstring''' _A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : List[Any] ): return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def lowerCAmelCase_ ( self : Dict ): # make random mask reproducible across the PT and TF model np.random.seed(2 ) _A = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ).to(_UpperCAmelCase ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) _A = ViTMAEConfig() _A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) _A = np.random.uniform(size=(1, num_patches) ) # forward pass with torch.no_grad(): _A = model(**_UpperCAmelCase , noise=torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase ) ) # verify the logits _A = torch.Size((1, 196, 768) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) _A = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_UpperCAmelCase ) , atol=1E-4 ) )
315
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''', '''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''', '''uclanlp/visualbert-vqa-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''', '''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''', '''uclanlp/visualbert-vcr-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json''' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = '''visual_bert''' def __init__( self : Union[str, Any] , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : List[Any]=768 , _UpperCAmelCase : Dict=512 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Union[str, Any]=3_072 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Union[str, Any]=512 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Union[str, Any]=1E-1_2 , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : str=2 , **_UpperCAmelCase : Any , ): super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = visual_embedding_dim _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps _A = bypass_transformer _A = special_visual_initialize
315
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
1
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
1
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _snake_case ( ) -> Any: '''simple docstring''' _A = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' _A = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ).convert('RGB' ) return image def _snake_case ( _snake_case : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _A = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _A = dct.pop(_snake_case ) _A = val def _snake_case ( _snake_case : Any , _snake_case : Any ) -> int: '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases _A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) _A = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict _A = torch.cat((q_bias, torch.zeros_like(_snake_case , requires_grad=_snake_case ), v_bias) ) _A = qkv_bias def _snake_case ( _snake_case : Any ) -> Dict: '''simple docstring''' _A = 3_64 if 'coco' in model_name else 2_24 _A = InstructBlipVisionConfig(image_size=_snake_case ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: _A = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: _A = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: _A = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict() elif "vicuna-13b" in model_name: _A = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 _A = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict() _A = InstructBlipConfig(vision_config=_snake_case , text_config=_snake_case , qformer_config=_snake_case ) return config, image_size @torch.no_grad() def _snake_case ( _snake_case : List[str] , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=False ) -> str: '''simple docstring''' _A = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: _A = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) _A = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) _A , _A = get_blipa_config(_snake_case ) _A = InstructBlipForConditionalGeneration(_snake_case ).eval() _A = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } _A , _A = model_name_to_original[model_name] # load original model print('Loading original model...' ) _A = 'cuda:1' if torch.cuda.is_available() else 'cpu' _A = 'cuda:2' if torch.cuda.is_available() else 'cpu' _A , _A , _A = load_model_and_preprocess( name=_snake_case , model_type=_snake_case , is_eval=_snake_case , device=_snake_case ) original_model.eval() print('Done!' ) # update state dict keys _A = original_model.state_dict() _A = create_rename_keys(_snake_case ) for src, dest in rename_keys: rename_key(_snake_case , _snake_case , _snake_case ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): _A = state_dict.pop(_snake_case ) if key.startswith('Qformer.bert' ): _A = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: _A = key.replace('self' , 'attention' ) if "llm_proj" in key: _A = key.replace('llm_proj' , 'language_projection' ) if "t5_proj" in key: _A = key.replace('t5_proj' , 'language_projection' ) if key.startswith('llm_model' ): _A = key.replace('llm_model' , 'language_model' ) if key.startswith('t5' ): _A = key.replace('t5' , 'language' ) _A = val # read in qv biases read_in_q_v_bias(_snake_case , _snake_case ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(_snake_case , strict=_snake_case ) _A = load_demo_image() _A = 'What is unusual about this image?' # create processor _A = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=_snake_case , image_std=_snake_case ) _A = InstructBlipProcessor( image_processor=_snake_case , tokenizer=_snake_case , qformer_tokenizer=_snake_case , ) _A = processor(images=_snake_case , text=_snake_case , return_tensors='pt' ).to(_snake_case ) # make sure processor creates exact same pixel values _A = vis_processors['eval'](_snake_case ).unsqueeze(0 ).to(_snake_case ) _A = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , _snake_case ) original_model.to(_snake_case ) hf_model.to(_snake_case ) with torch.no_grad(): if "vicuna" in model_name: _A = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits _A = hf_model(**_snake_case ).logits else: _A = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits _A = tokenizer('\n' , return_tensors='pt' ).input_ids.to(_snake_case ) _A = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 ) _A = hf_model(**_snake_case , labels=_snake_case ).logits print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape _A = 1E-4 if 'vicuna' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , _snake_case , atol=_snake_case ) print('Looks ok!' ) print('Generating with original model...' ) _A = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) _A = hf_model.generate( **_snake_case , do_sample=_snake_case , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? _A = 2 print('Original generation:' , _snake_case ) _A = processor.batch_decode(_snake_case , skip_special_tokens=_snake_case ) _A = [text.strip() for text in output_text] print('HF generation:' , _snake_case ) if pytorch_dump_folder_path is not None: processor.save_pretrained(_snake_case ) hf_model.save_pretrained(_snake_case ) if push_to_hub: processor.push_to_hub(F'''Salesforce/{model_name}''' ) hf_model.push_to_hub(F'''Salesforce/{model_name}''' ) if __name__ == "__main__": a = argparse.ArgumentParser() a = [ '''instructblip-vicuna-7b''', '''instructblip-vicuna-13b''', '''instructblip-flan-t5-xl''', '''instructblip-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''instructblip-flan-t5-xl''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) a = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
315
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
1
"""simple docstring""" import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def _snake_case ( _snake_case : str ) -> Tuple: '''simple docstring''' _A = int(_snake_case ) _A , _A , _A = t // 36_00, (t // 60) % 60, t % 60 return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}''' def _snake_case ( _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Tuple , _snake_case : int , _snake_case : Optional[int]=3_00 ) -> Tuple: '''simple docstring''' return F''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def _snake_case ( _snake_case : str ) -> Optional[int]: '''simple docstring''' _A = '<table border="1" class="dataframe">\n' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: _A = F'''{elt:.6f}''' if isinstance(_snake_case , _snake_case ) else str(_snake_case ) html_code += F''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class lowercase_ : '''simple docstring''' UpperCAmelCase : Dict = 5 UpperCAmelCase : int = 0.2 def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional["NotebookTrainingTracker"] = None , _UpperCAmelCase : int = 300 , ): _A = total _A = '' if prefix is None else prefix _A = leave _A = parent _A = width _A = None _A = None _A = None def lowerCAmelCase_ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : bool = False , _UpperCAmelCase : str = None ): _A = value if comment is not None: _A = comment if self.last_value is None: _A = _A = time.time() _A = _A = value _A = _A = None _A = self.warmup _A = 1 self.update_bar(_UpperCAmelCase ) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ): if self.first_calls > 0: self.first_calls -= 1 _A = time.time() _A = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: _A = self.elapsed_time / (value - self.start_value) else: _A = None if value >= self.total: _A = self.total _A = None if not self.leave: self.close() elif self.average_time_per_item is not None: _A = self.average_time_per_item * (self.total - value) self.update_bar(_UpperCAmelCase ) _A = value _A = current_time if self.average_time_per_item is None: _A = 1 else: _A = max(int(self.update_every / self.average_time_per_item ) , 1 ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict=None ): _A = ' ' * (len(str(self.total ) ) - len(str(_UpperCAmelCase ) )) + str(_UpperCAmelCase ) if self.elapsed_time is None: _A = F'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: _A = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}''' else: _A = ( F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <''' F''' {format_time(self.predicted_remaining )}''' ) self.label += F''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]''' self.display() def lowerCAmelCase_ ( self : List[str] ): _A = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: _A = disp.display(disp.HTML(self.html_code ) , display_id=_UpperCAmelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def lowerCAmelCase_ ( self : List[Any] ): if self.parent is None and self.output is not None: self.output.update(disp.HTML('' ) ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any]=None ): super().__init__(_UpperCAmelCase ) _A = None if column_names is None else [column_names] _A = None def lowerCAmelCase_ ( self : List[str] ): _A = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width ) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table ) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: _A = disp.display(disp.HTML(self.html_code ) , display_id=_UpperCAmelCase ) else: self.output.update(disp.HTML(self.html_code ) ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str ): if self.inner_table is None: _A = [list(values.keys() ), list(values.values() )] else: _A = self.inner_table[0] if len(self.inner_table ) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(_UpperCAmelCase ) _A = columns self.inner_table.append([values[c] for c in columns] ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[int]=300 ): _A = NotebookProgressBar(_UpperCAmelCase , prefix=_UpperCAmelCase , parent=self , width=_UpperCAmelCase ) return self.child_bar def lowerCAmelCase_ ( self : Optional[int] ): _A = None self.display() class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[int] ): _A = None _A = None _A = False def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , **_UpperCAmelCase : Union[str, Any] ): _A = 'Epoch' if args.evaluation_strategy == IntervalStrategy.EPOCH else 'Step' _A = 0 _A = 0 _A = [self.first_column] + ['Training Loss'] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('Validation Loss' ) _A = NotebookTrainingTracker(state.max_steps , _UpperCAmelCase ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : Any ): _A = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , ) _A = False def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : Optional[Any] ): if not has_length(_UpperCAmelCase ): return if self.prediction_bar is None: if self.training_tracker is not None: _A = self.training_tracker.add_child(len(_UpperCAmelCase ) ) else: _A = NotebookProgressBar(len(_UpperCAmelCase ) ) self.prediction_bar.update(1 ) else: self.prediction_bar.update(self.prediction_bar.value + 1 ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , **_UpperCAmelCase : Any ): if self.prediction_bar is not None: self.prediction_bar.close() _A = None def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str=None , **_UpperCAmelCase : Tuple ): # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: _A = {'Training Loss': logs['loss']} # First column is necessarily Step sine we're not in epoch eval strategy _A = state.global_step self.training_tracker.write_line(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : Union[str, Any] ): if self.training_tracker is not None: _A = {'Training Loss': 'No log', 'Validation Loss': 'No log'} for log in reversed(state.log_history ): if "loss" in log: _A = log['loss'] break if self.first_column == "Epoch": _A = int(state.epoch ) else: _A = state.global_step _A = 'eval' for k in metrics: if k.endswith('_loss' ): _A = re.sub(r'\_loss$' , '' , _UpperCAmelCase ) _A = metrics.pop('total_flos' , _UpperCAmelCase ) _A = metrics.pop('epoch' , _UpperCAmelCase ) _A = metrics.pop(F'''{metric_key_prefix}_runtime''' , _UpperCAmelCase ) _A = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , _UpperCAmelCase ) _A = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , _UpperCAmelCase ) _A = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , _UpperCAmelCase ) for k, v in metrics.items(): if k == F'''{metric_key_prefix}_loss''': _A = v else: _A = k.split('_' ) _A = ' '.join([part.capitalize() for part in splits[1:]] ) _A = v self.training_tracker.write_line(_UpperCAmelCase ) self.training_tracker.remove_child() _A = None # Evaluation takes a long time so we should force the next update. _A = True def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , **_UpperCAmelCase : int ): self.training_tracker.update( state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=_UpperCAmelCase ) _A = None
315
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
1
"""simple docstring""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL a = logging.get_logger(__name__) def _snake_case ( _snake_case : np.ndarray , _snake_case : Union[int, Iterable[int]] , _snake_case : bool , _snake_case : int ) -> Tuple[int, int]: '''simple docstring''' def constraint_to_multiple_of(_snake_case : int , _snake_case : Tuple , _snake_case : Dict=0 , _snake_case : Union[str, Any]=None ): _A = round(val / multiple ) * multiple if max_val is not None and x > max_val: _A = math.floor(val / multiple ) * multiple if x < min_val: _A = math.ceil(val / multiple ) * multiple return x _A = (output_size, output_size) if isinstance(_snake_case , _snake_case ) else output_size _A , _A = get_image_size(_snake_case ) _A , _A = output_size # determine new height and width _A = output_height / input_height _A = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _A = scale_width else: # fit height _A = scale_height _A = constraint_to_multiple_of(scale_height * input_height , multiple=_snake_case ) _A = constraint_to_multiple_of(scale_width * input_width , multiple=_snake_case ) return (new_height, new_width) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''pixel_values'''] def __init__( self : str , _UpperCAmelCase : bool = True , _UpperCAmelCase : Dict[str, int] = None , _UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[int, float] = 1 / 255 , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , **_UpperCAmelCase : Union[str, Any] , ): super().__init__(**_UpperCAmelCase ) _A = size if size is not None else {'height': 384, 'width': 384} _A = get_size_dict(_UpperCAmelCase ) _A = do_resize _A = size _A = keep_aspect_ratio _A = ensure_multiple_of _A = resample _A = do_rescale _A = rescale_factor _A = do_normalize _A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _A = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Dict[str, int] , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 1 , _UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[Any] , ): _A = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) _A = get_resize_output_image_size( _UpperCAmelCase , output_size=(size['height'], size['width']) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[int, float] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : Any , ): return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Union[float, List[float]] , _UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCAmelCase : List[str] , ): return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : ImageInput , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : int = None , _UpperCAmelCase : PILImageResampling = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : float = None , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[float, List[float]]] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCAmelCase : Any , ): _A = do_resize if do_resize is not None else self.do_resize _A = size if size is not None else self.size _A = get_size_dict(_UpperCAmelCase ) _A = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _A = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _A = resample if resample is not None else self.resample _A = do_rescale if do_rescale is not None else self.do_rescale _A = rescale_factor if rescale_factor is not None else self.rescale_factor _A = do_normalize if do_normalize is not None else self.do_normalize _A = image_mean if image_mean is not None else self.image_mean _A = image_std if image_std is not None else self.image_std _A = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. _A = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: _A = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_rescale: _A = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: _A = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] _A = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] _A = {'pixel_values': images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Tuple] = None ): _A = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError( 'Make sure that you pass in as many target sizes as the batch dimension of the logits' ) if is_torch_tensor(_UpperCAmelCase ): _A = target_sizes.numpy() _A = [] for idx in range(len(_UpperCAmelCase ) ): _A = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_UpperCAmelCase ) _A = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_UpperCAmelCase ) else: _A = logits.argmax(dim=1 ) _A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : str , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : int=7 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=99 , _UpperCAmelCase : List[Any]=32 , _UpperCAmelCase : List[Any]=5 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : List[Any]=37 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Any=16 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Any=4 , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_attention_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_choices def lowerCAmelCase_ ( self : Dict ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_attention_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None if self.use_token_type_ids: _A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) _A = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCAmelCase_ ( self : Optional[int] ): _A = self.prepare_config_and_inputs() _A , _A , _A , _A = config_and_inputs _A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict def lowerCAmelCase_ ( self : int ): _A = self.prepare_config_and_inputs() _A , _A , _A , _A = config_and_inputs _A = True _A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) _A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = True UpperCAmelCase : Dict = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase_ ( self : Tuple ): _A = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): for model_class_name in self.all_model_classes: _A = model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_UpperCAmelCase ) _A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class lowercase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : str ): _A = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_UpperCAmelCase ) _A = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) _A = model(_UpperCAmelCase )[0] _A = [1, 11, 50_265] self.assertEqual(list(output.shape ) , _UpperCAmelCase ) # compare the actual values for a slice. _A = np.array( [[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A = FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=_UpperCAmelCase ) _A = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa ) _A = model(_UpperCAmelCase )[0] # compare the actual values for a slice. _A = np.array( [[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" from argparse import ArgumentParser from .env import EnvironmentCommand def _snake_case ( ) -> Optional[int]: '''simple docstring''' _A = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' ) _A = parser.add_subparsers(help='diffusers-cli command helpers' ) # Register commands EnvironmentCommand.register_subcommand(_snake_case ) # Let's go _A = parser.parse_args() if not hasattr(_snake_case , 'func' ): parser.print_help() exit(1 ) # Run _A = args.func(_snake_case ) service.run() if __name__ == "__main__": main()
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def _snake_case ( _snake_case : str , _snake_case : str ) -> str | Literal[False]: '''simple docstring''' _A = list(_snake_case ) _A = list(_snake_case ) _A = 0 for i in range(len(_snake_case ) ): if lista[i] != lista[i]: count += 1 _A = '_' if count > 1: return False else: return "".join(_snake_case ) def _snake_case ( _snake_case : list[str] ) -> list[str]: '''simple docstring''' _A = [] while True: _A = ['$'] * len(_snake_case ) _A = [] for i in range(len(_snake_case ) ): for j in range(i + 1 , len(_snake_case ) ): _A = compare_string(binary[i] , binary[j] ) if k is False: _A = '*' _A = '*' temp.append('X' ) for i in range(len(_snake_case ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_snake_case ) == 0: return pi _A = list(set(_snake_case ) ) def _snake_case ( _snake_case : int , _snake_case : Sequence[float] ) -> list[str]: '''simple docstring''' _A = [] for minterm in minterms: _A = '' for _ in range(_snake_case ): _A = str(minterm % 2 ) + string minterm //= 2 temp.append(_snake_case ) return temp def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : int ) -> bool: '''simple docstring''' _A = list(_snake_case ) _A = list(_snake_case ) _A = 0 for i in range(len(_snake_case ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def _snake_case ( _snake_case : list[list[int]] , _snake_case : list[str] ) -> list[str]: '''simple docstring''' _A = [] _A = [0] * len(_snake_case ) for i in range(len(chart[0] ) ): _A = 0 _A = -1 for j in range(len(_snake_case ) ): if chart[j][i] == 1: count += 1 _A = j if count == 1: _A = 1 for i in range(len(_snake_case ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_snake_case ) ): _A = 0 temp.append(prime_implicants[i] ) while True: _A = 0 _A = -1 _A = 0 for i in range(len(_snake_case ) ): _A = chart[i].count(1 ) if count_n > max_n: _A = count_n _A = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_snake_case ) ): _A = 0 def _snake_case ( _snake_case : list[str] , _snake_case : list[str] ) -> list[list[int]]: '''simple docstring''' _A = [[0 for x in range(len(_snake_case ) )] for x in range(len(_snake_case ) )] for i in range(len(_snake_case ) ): _A = prime_implicants[i].count('_' ) for j in range(len(_snake_case ) ): if is_for_table(prime_implicants[i] , binary[j] , _snake_case ): _A = 1 return chart def _snake_case ( ) -> None: '''simple docstring''' _A = int(input('Enter the no. of variables\n' ) ) _A = [ float(_snake_case ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _A = decimal_to_binary(_snake_case , _snake_case ) _A = check(_snake_case ) print('Prime Implicants are:' ) print(_snake_case ) _A = prime_implicant_chart(_snake_case , _snake_case ) _A = selection(_snake_case , _snake_case ) print('Essential Prime Implicants are:' ) print(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod() main()
315
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
1
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def _snake_case ( _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Dict , _snake_case : Union[str, Any]=None , _snake_case : str=None , _snake_case : Optional[Any]=None , _snake_case : Tuple=None , _snake_case : Dict=None , ) -> str: '''simple docstring''' if attention_mask is None: _A = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: _A = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: _A = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=_snake_case ) if decoder_head_mask is None: _A = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_snake_case ) if cross_attn_head_mask is None: _A = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=_snake_case ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class lowercase_ : '''simple docstring''' def __init__( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : List[Any]="relu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[str]=20 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : str=0 , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = encoder_layerdrop _A = decoder_layerdrop _A = max_position_embeddings _A = eos_token_id _A = pad_token_id _A = bos_token_id def lowerCAmelCase_ ( self : Dict ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = self.eos_token_id # Eos Token _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input _A = input_ids.clamp(self.pad_token_id + 1 ) _A = decoder_input_ids.clamp(self.pad_token_id + 1 ) _A = self.get_config() _A = prepare_mam_aaa_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def lowerCAmelCase_ ( self : Dict ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def lowerCAmelCase_ ( self : List[str] ): _A , _A = self.prepare_config_and_inputs() return config, inputs_dict def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Any ): _A = MaMaaaModel(config=_UpperCAmelCase ).get_decoder().to(_UpperCAmelCase ).eval() _A = inputs_dict['input_ids'] _A = inputs_dict['attention_mask'] _A = inputs_dict['head_mask'] # first forward pass _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , head_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) _A , _A = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids _A = ids_tensor((self.batch_size, 3) , config.vocab_size ) _A = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and _A = torch.cat([input_ids, next_tokens] , dim=-1 ) _A = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )['last_hidden_state'] _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[ 'last_hidden_state' ] # select random slice _A = ids_tensor((1,) , output_from_past.shape[-1] ).item() _A = output_from_no_past[:, -3:, random_slice_idx].detach() _A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-2 ) ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ): _A = MaMaaaModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval() _A = model(**_UpperCAmelCase ) _A = outputs.encoder_last_hidden_state _A = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: _A = model.get_encoder() encoder.save_pretrained(_UpperCAmelCase ) _A = MaMaaaEncoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase ) _A = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 ) with tempfile.TemporaryDirectory() as tmpdirname: _A = model.get_decoder() decoder.save_pretrained(_UpperCAmelCase ) _A = MaMaaaDecoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase ) _A = decoder( input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=inputs_dict['attention_mask'] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 ) @require_torch class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Tuple = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) UpperCAmelCase : Dict = (MaMaaaForConditionalGeneration,) if is_torch_available() else () UpperCAmelCase : int = ( { '''conversational''': MaMaaaForConditionalGeneration, '''feature-extraction''': MaMaaaModel, '''summarization''': MaMaaaForConditionalGeneration, '''text2text-generation''': MaMaaaForConditionalGeneration, '''translation''': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) UpperCAmelCase : int = True UpperCAmelCase : List[Any] = True UpperCAmelCase : int = False UpperCAmelCase : Dict = False def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def lowerCAmelCase_ ( self : Tuple ): _A = MaMaaaModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Optional[int] ): _A , _A = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: _A = model_class(_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_UpperCAmelCase ) _A , _A = model_class.from_pretrained(_UpperCAmelCase , output_loading_info=_UpperCAmelCase ) self.assertEqual(info['missing_keys'] , [] ) def lowerCAmelCase_ ( self : str ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): _A = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = copy.deepcopy(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) if not self.is_encoder_decoder: _A = inputs['input_ids'] del inputs["input_ids"] else: _A = inputs['input_ids'] _A = inputs.get('decoder_input_ids' , _UpperCAmelCase ) del inputs["input_ids"] inputs.pop('decoder_input_ids' , _UpperCAmelCase ) _A = model.get_input_embeddings() if not self.is_encoder_decoder: _A = wte(_UpperCAmelCase ) else: _A = wte(_UpperCAmelCase ) _A = wte(_UpperCAmelCase ) with torch.no_grad(): model(**_UpperCAmelCase )[0] def lowerCAmelCase_ ( self : Tuple ): _A , _A = self.model_tester.prepare_config_and_inputs() _A = input_dict['input_ids'] _A = input_ids.ne(1 ).to(_UpperCAmelCase ) _A = MaMaaaForConditionalGeneration(_UpperCAmelCase ).eval().to(_UpperCAmelCase ) if torch_device == "cuda": model.half() model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ) model.generate(num_beams=4 , do_sample=_UpperCAmelCase , early_stopping=_UpperCAmelCase , num_return_sequences=3 ) def _snake_case ( _snake_case : Optional[Any] ) -> str: '''simple docstring''' return torch.tensor(_snake_case , dtype=torch.long , device=_snake_case ) a = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Dict ): return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ) def lowerCAmelCase_ ( self : List[Any] ): _A = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(_UpperCAmelCase ) _A = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) _A = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) _A = prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase ) with torch.no_grad(): _A = model(**_UpperCAmelCase )[0] _A = torch.Size((1, 11, 1_024) ) self.assertEqual(output.shape , _UpperCAmelCase ) # change to expected output here _A = torch.tensor( [[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_UpperCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[str] ): _A = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_UpperCAmelCase ) # change to intended input _A = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) _A = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) _A = prepare_mam_aaa_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase ) with torch.no_grad(): _A = model(**_UpperCAmelCase )[0] _A = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , _UpperCAmelCase ) # change to expected output here _A = torch.tensor( [[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_UpperCAmelCase ) self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(_UpperCAmelCase ) _A = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' ) _A = [ 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent' ' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de' ' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.', ] # The below article tests that we don't add any hypotheses outside of the top n_beams _A = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' ) _A = model.generate( input_ids=dct['input_ids'].to(_UpperCAmelCase ) , attention_mask=dct['attention_mask'].to(_UpperCAmelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , ) _A = [ 'The NSA case highlights the total absence of intelligence debate', 'I think there are two levels of response from the French government.', 'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.' ' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all' ' communications in France.', ] _A = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) assert generated == expected_en
315
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
315
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
1
"""simple docstring""" import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu a = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: a = json.load(f) @require_torch class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Dict ): return FSMTTokenizer.from_pretrained(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] ): _A = FSMTForConditionalGeneration.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ] ) @slow def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality _A = F'''facebook/wmt19-{pair}''' _A = self.get_tokenizer(_UpperCAmelCase ) _A = self.get_model(_UpperCAmelCase ) _A = bleu_data[pair]['src'] _A = bleu_data[pair]['tgt'] _A = tokenizer(_UpperCAmelCase , return_tensors='pt' , truncation=_UpperCAmelCase , padding='longest' ).to(_UpperCAmelCase ) _A = model.generate( input_ids=batch.input_ids , num_beams=8 , ) _A = tokenizer.batch_decode( _UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase ) _A = calculate_bleu(_UpperCAmelCase , _UpperCAmelCase ) print(_UpperCAmelCase ) self.assertGreaterEqual(scores['bleu'] , _UpperCAmelCase )
315
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
1
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def _snake_case ( _snake_case : int , _snake_case : List[str] , _snake_case : Optional[Any] ) -> Tuple: '''simple docstring''' if gpta_config_file == "": _A = GPTaConfig() else: _A = GPTaConfig.from_json_file(_snake_case ) _A = GPTaModel(_snake_case ) # Load weights from numpy load_tf_weights_in_gpta(_snake_case , _snake_case , _snake_case ) # Save pytorch-model _A = pytorch_dump_folder_path + '/' + WEIGHTS_NAME _A = pytorch_dump_folder_path + '/' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict() , _snake_case ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(_snake_case , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--gpt2_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained OpenAI model. \n''' '''This specifies the model architecture.''' ), ) a = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
315
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
1
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch a = random.Random() def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[int]=1.0 , _snake_case : List[str]=None , _snake_case : str=None ) -> List[Any]: '''simple docstring''' if rng is None: _A = global_rng _A = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : Optional[int]=400 , _UpperCAmelCase : Tuple=2_000 , _UpperCAmelCase : List[Any]=10 , _UpperCAmelCase : Optional[int]=160 , _UpperCAmelCase : Optional[Any]=8 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : Optional[int]=4_000 , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : Optional[int]=True , ): _A = parent _A = batch_size _A = min_seq_length _A = max_seq_length _A = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _A = padding_value _A = sampling_rate _A = return_attention_mask _A = do_normalize _A = feature_size _A = chunk_length _A = hop_length def lowerCAmelCase_ ( self : Tuple ): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : List[str]=False ): def _flatten(_UpperCAmelCase : str ): return list(itertools.chain(*_UpperCAmelCase ) ) if equal_length: _A = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _A = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _A = [np.asarray(_UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Tuple = WhisperFeatureExtractor if is_speech_available() else None def lowerCAmelCase_ ( self : Optional[Any] ): _A = WhisperFeatureExtractionTester(self ) def lowerCAmelCase_ ( self : Optional[int] ): _A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _A = feat_extract_first.save_pretrained(_UpperCAmelCase )[0] check_json_file_has_correct_format(_UpperCAmelCase ) _A = self.feature_extraction_class.from_pretrained(_UpperCAmelCase ) _A = feat_extract_first.to_dict() _A = feat_extract_second.to_dict() _A = feat_extract_first.mel_filters _A = feat_extract_second.mel_filters self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _A = os.path.join(_UpperCAmelCase , 'feat_extract.json' ) feat_extract_first.to_json_file(_UpperCAmelCase ) _A = self.feature_extraction_class.from_json_file(_UpperCAmelCase ) _A = feat_extract_first.to_dict() _A = feat_extract_second.to_dict() _A = feat_extract_first.mel_filters _A = feat_extract_second.mel_filters self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): # Tests that all call wrap to encode_plus and batch_encode_plus _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _A = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] _A = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] # Test feature size _A = feature_extractor(_UpperCAmelCase , padding='max_length' , return_tensors='np' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input _A = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features _A = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) # Test batched _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. _A = [floats_list((1, x) )[0] for x in (800, 800, 800)] _A = np.asarray(_UpperCAmelCase ) _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) # Test truncation required _A = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] _A = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs] _A = [x[: feature_extractor.n_samples] for x in speech_inputs] _A = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs_truncated] _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features _A = feature_extractor(_UpperCAmelCase , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) def lowerCAmelCase_ ( self : Dict ): import torch _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _A = np.random.rand(100 , 32 ).astype(np.floataa ) _A = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _A = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) _A = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Dict ): _A = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech _A = ds.sort('id' ).select(range(_UpperCAmelCase ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def lowerCAmelCase_ ( self : List[Any] ): # fmt: off _A = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on _A = self._load_datasamples(1 ) _A = WhisperFeatureExtractor() _A = feature_extractor(_UpperCAmelCase , return_tensors='pt' ).input_features self.assertEqual(input_features.shape , (1, 80, 3_000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , _UpperCAmelCase , atol=1E-4 ) ) def lowerCAmelCase_ ( self : List[str] ): _A = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _A = self._load_datasamples(1 )[0] _A = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue _A = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=_UpperCAmelCase )[0] self.assertTrue(np.all(np.mean(_UpperCAmelCase ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(_UpperCAmelCase ) - 1 ) < 1E-3 ) )
315
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
1
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" def _snake_case ( _snake_case : list[int] ) -> list[int]: '''simple docstring''' _A = len(_snake_case ) for i in range(_snake_case ): for j in range(i + 1 , _snake_case ): if numbers[j] < numbers[i]: _A , _A = numbers[j], numbers[i] return numbers if __name__ == "__main__": a = input('''Enter numbers separated by a comma:\n''').strip() a = [int(item) for item in user_input.split(''',''')] print(exchange_sort(unsorted))
315
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
1
"""simple docstring""" from __future__ import annotations class lowercase_ : '''simple docstring''' def __init__( self : str , _UpperCAmelCase : list[list[int]] ): _A = TypeError( 'Matrices must be formed from a list of zero or more lists containing at ' 'least one and the same number of values, each of which must be of type ' 'int or float.' ) if len(_UpperCAmelCase ) != 0: _A = len(rows[0] ) if cols == 0: raise error for row in rows: if len(_UpperCAmelCase ) != cols: raise error for value in row: if not isinstance(_UpperCAmelCase , (int, float) ): raise error _A = rows else: _A = [] def lowerCAmelCase_ ( self : Optional[int] ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def lowerCAmelCase_ ( self : Tuple ): return len(self.rows ) @property def lowerCAmelCase_ ( self : Optional[Any] ): return len(self.rows[0] ) @property def lowerCAmelCase_ ( self : int ): return (self.num_rows, self.num_columns) @property def lowerCAmelCase_ ( self : Tuple ): return self.order[0] == self.order[1] def lowerCAmelCase_ ( self : int ): _A = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Any ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def lowerCAmelCase_ ( self : str ): return bool(self.determinant() ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : int ): _A = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(_UpperCAmelCase ).determinant() def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ): if (row + column) % 2 == 0: return self.get_minor(_UpperCAmelCase , _UpperCAmelCase ) return -1 * self.get_minor(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): return Matrix( [ [self.get_minor(_UpperCAmelCase , _UpperCAmelCase ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def lowerCAmelCase_ ( self : Optional[Any] ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def lowerCAmelCase_ ( self : int ): _A = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.determinant() if not determinant: raise TypeError('Only matrices with a non-zero determinant have an inverse' ) return self.adjugate() * (1 / determinant) def __repr__( self : Tuple ): return str(self.rows ) def __str__( self : Optional[int] ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '[' + '. '.join([str(_UpperCAmelCase ) for value in row] ) + '.]' for row in self.rows ] ) + "]" ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int | None = None ): _A = TypeError('Row must be a list containing all ints and/or floats' ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise type_error for value in row: if not isinstance(_UpperCAmelCase , (int, float) ): raise type_error if len(_UpperCAmelCase ) != self.num_columns: raise ValueError( 'Row must be equal in length to the other rows in the matrix' ) if position is None: self.rows.append(_UpperCAmelCase ) else: _A = self.rows[0:position] + [row] + self.rows[position:] def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int | None = None ): _A = TypeError( 'Column must be a list containing all ints and/or floats' ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise type_error for value in column: if not isinstance(_UpperCAmelCase , (int, float) ): raise type_error if len(_UpperCAmelCase ) != self.num_rows: raise ValueError( 'Column must be equal in length to the other columns in the matrix' ) if position is None: _A = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: _A = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self : Optional[int] , _UpperCAmelCase : object ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): return NotImplemented return self.rows == other.rows def __ne__( self : Optional[int] , _UpperCAmelCase : object ): return not self == other def __neg__( self : int ): return self * -1 def __add__( self : int , _UpperCAmelCase : Matrix ): if self.order != other.order: raise ValueError('Addition requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self : str , _UpperCAmelCase : Matrix ): if self.order != other.order: raise ValueError('Subtraction requires matrices of the same order' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self : Union[str, Any] , _UpperCAmelCase : Matrix | int | float ): if isinstance(_UpperCAmelCase , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): if self.num_columns != other.num_rows: raise ValueError( 'The number of columns in the first matrix must ' 'be equal to the number of rows in the second' ) return Matrix( [ [Matrix.dot_product(_UpperCAmelCase , _UpperCAmelCase ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( 'A Matrix can only be multiplied by an int, float, or another matrix' ) def __pow__( self : Optional[Any] , _UpperCAmelCase : int ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise TypeError('A Matrix can only be raised to the power of an int' ) if not self.is_square: raise ValueError('Only square matrices can be raised to a power' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( 'Only invertable matrices can be raised to a negative power' ) _A = self for _ in range(other - 1 ): result *= self return result @classmethod def lowerCAmelCase_ ( cls : Dict , _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] ): return sum(row[i] * column[i] for i in range(len(_UpperCAmelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
1
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer a = logging.get_logger(__name__) a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} a = { '''vocab_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''', }, '''merges_file''': { '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''Salesforce/codegen-350M-mono''': ( '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json''' ), }, } a = { '''Salesforce/codegen-350M-mono''': 2_048, } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = VOCAB_FILES_NAMES UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Union[str, Any] = ['''input_ids''', '''attention_mask'''] UpperCAmelCase : Dict = CodeGenTokenizer def __init__( self : Dict , _UpperCAmelCase : str=None , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any="<|endoftext|>" , _UpperCAmelCase : Dict="<|endoftext|>" , _UpperCAmelCase : Tuple="<|endoftext|>" , _UpperCAmelCase : Any=False , **_UpperCAmelCase : int , ): super().__init__( _UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) if kwargs.pop('add_bos_token' , _UpperCAmelCase ): _A = kwargs.pop('name_or_path' , '' ) raise ValueError( 'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.' 'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n' F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' 'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.' ' so that the fast tokenizer works correctly.' ) _A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space: _A = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) ) _A = add_prefix_space _A = pre_tok_class(**_UpperCAmelCase ) _A = add_prefix_space def lowerCAmelCase_ ( self : Any , *_UpperCAmelCase : Any , **_UpperCAmelCase : int ): _A = kwargs.get('is_split_into_words' , _UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Any ): _A = kwargs.get('is_split_into_words' , _UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): _A = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = None , _UpperCAmelCase : Optional[List[str]] = None , **_UpperCAmelCase : Dict , ): _A = super().decode( token_ids=_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase , **_UpperCAmelCase , ) if truncate_before_pattern is not None and len(_UpperCAmelCase ) > 0: _A = self.truncate(_UpperCAmelCase , _UpperCAmelCase ) return decoded_text def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Any ): def find_re(_UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] ): _A = pattern.search(_UpperCAmelCase , _UpperCAmelCase ) return m.start() if m else -1 _A = [re.compile(_UpperCAmelCase , re.MULTILINE ) for pattern in truncate_before_pattern] _A = list(re.finditer('^print' , _UpperCAmelCase , re.MULTILINE ) ) if len(_UpperCAmelCase ) > 1: _A = completion[: prints[1].start()] _A = list(re.finditer('^def' , _UpperCAmelCase , re.MULTILINE ) ) if len(_UpperCAmelCase ) > 1: _A = completion[: defs[1].start()] _A = 0 _A = [ pos for pos in [find_re(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for terminal in terminals] if pos != -1 ] if len(_UpperCAmelCase ) > 0: return completion[: min(_UpperCAmelCase )] else: return completion
315
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) a = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''DeiTFeatureExtractor'''] a = ['''DeiTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DeiTForImageClassification''', '''DeiTForImageClassificationWithTeacher''', '''DeiTForMaskedImageModeling''', '''DeiTModel''', '''DeiTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ '''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFDeiTForImageClassification''', '''TFDeiTForImageClassificationWithTeacher''', '''TFDeiTForMaskedImageModeling''', '''TFDeiTModel''', '''TFDeiTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
315
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
1
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : List[str] = LDMTextToImagePipeline UpperCAmelCase : Dict = TEXT_TO_IMAGE_PARAMS - { '''negative_prompt''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', '''prompt_embeds''', } UpperCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''callback''', '''callback_steps''', } UpperCAmelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase : Tuple = False def lowerCAmelCase_ ( self : List[str] ): torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) _A = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , ) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) _A = CLIPTextModel(_UpperCAmelCase ) _A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _A = { 'unet': unet, 'scheduler': scheduler, 'vqvae': vae, 'bert': text_encoder, 'tokenizer': tokenizer, } return components def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=0 ): if str(_UpperCAmelCase ).startswith('mps' ): _A = torch.manual_seed(_UpperCAmelCase ) else: _A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) _A = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self : Any ): _A = 'cpu' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = LDMTextToImagePipeline(**_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = self.get_dummy_inputs(_UpperCAmelCase ) _A = pipe(**_UpperCAmelCase ).images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _A = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Union[str, Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int=torch.floataa , _UpperCAmelCase : str=0 ): _A = torch.manual_seed(_UpperCAmelCase ) _A = np.random.RandomState(_UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) _A = torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) _A = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self : List[str] ): _A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = self.get_inputs(_UpperCAmelCase ) _A = pipe(**_UpperCAmelCase ).images _A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) _A = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] ) _A = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Tuple ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : int=torch.floataa , _UpperCAmelCase : str=0 ): _A = torch.manual_seed(_UpperCAmelCase ) _A = np.random.RandomState(_UpperCAmelCase ).standard_normal((1, 4, 32, 32) ) _A = torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) _A = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def lowerCAmelCase_ ( self : Optional[Any] ): _A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) _A = self.get_inputs(_UpperCAmelCase ) _A = pipe(**_UpperCAmelCase ).images[0] _A = load_numpy( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' ) _A = np.abs(expected_image - image ).max() assert max_diff < 1E-3
315
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
1
"""simple docstring""" import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup a = { '''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''' } def _snake_case ( _snake_case : str = "dhaka" , _snake_case : int = 5 ) -> int: '''simple docstring''' _A = min(_snake_case , 50 ) # Prevent abuse! _A = { 'q': query, 'tbm': 'isch', 'hl': 'en', 'ijn': '0', } _A = requests.get('https://www.google.com/search' , params=_snake_case , headers=_snake_case ) _A = BeautifulSoup(html.text , 'html.parser' ) _A = ''.join( re.findall(R'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script' ) ) ) ) _A = json.dumps(_snake_case ) _A = json.loads(_snake_case ) _A = re.findall( R'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , _snake_case , ) if not matched_google_image_data: return 0 _A = re.sub( R'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(_snake_case ) , ) _A = re.findall( R'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , _snake_case , ) for index, fixed_full_res_image in enumerate(_snake_case ): if index >= max_images: return index _A = bytes(_snake_case , 'ascii' ).decode( 'unicode-escape' ) _A = bytes(_snake_case , 'ascii' ).decode( 'unicode-escape' ) _A = urllib.request.build_opener() _A = [ ( 'User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36' ' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582', ) ] urllib.request.install_opener(_snake_case ) _A = F'''query_{query.replace(" " , "_" )}''' if not os.path.exists(_snake_case ): os.makedirs(_snake_case ) urllib.request.urlretrieve( # noqa: S310 _snake_case , F'''{path_name}/original_size_img_{index}.jpg''' ) return index if __name__ == "__main__": try: a = download_images_from_google_query(sys.argv[1]) print(F'''{image_count} images were downloaded to disk.''') except IndexError: print('''Please provide a search term.''') raise
315
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
1
"""simple docstring""" a = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def _snake_case ( _snake_case : dict , _snake_case : Any , _snake_case : int ) -> list[str]: '''simple docstring''' _A = set() # keep track of all the paths to be checked _A = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue _A = queue.pop(0 ) # get the last node from the path _A = path[-1] if node not in explored: _A = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: _A = list(_snake_case ) new_path.append(_snake_case ) queue.append(_snake_case ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_snake_case ) # in case there's no path between the 2 nodes return [] def _snake_case ( _snake_case : dict , _snake_case : Union[str, Any] , _snake_case : int ) -> int: '''simple docstring''' if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 _A = [start] _A = set(_snake_case ) # Keep tab on distances from `start` node. _A = {start: 0, target: -1} while queue: _A = queue.pop(0 ) if node == target: _A = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_snake_case ) queue.append(_snake_case ) _A = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
315
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging if TYPE_CHECKING: from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase a = logging.get_logger(__name__) a = { '''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''', '''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''', '''allenai/longformer-large-4096-finetuned-triviaqa''': ( '''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json''' ), '''allenai/longformer-base-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json''' ), '''allenai/longformer-large-4096-extra.pos.embd.only''': ( '''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json''' ), } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[str] = '''longformer''' def __init__( self : int , _UpperCAmelCase : Union[List[int], int] = 512 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 30_522 , _UpperCAmelCase : int = 768 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 12 , _UpperCAmelCase : int = 3_072 , _UpperCAmelCase : str = "gelu" , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : int = 512 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1E-1_2 , _UpperCAmelCase : bool = False , **_UpperCAmelCase : int , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _A = attention_window _A = sep_token_id _A = bos_token_id _A = eos_token_id _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = initializer_range _A = layer_norm_eps _A = onnx_export class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : List[Any] , _UpperCAmelCase : "PretrainedConfig" , _UpperCAmelCase : str = "default" , _UpperCAmelCase : "List[PatchingSpec]" = None ): super().__init__(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = True @property def lowerCAmelCase_ ( self : List[str] ): if self.task == "multiple-choice": _A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('global_attention_mask', dynamic_axis), ] ) @property def lowerCAmelCase_ ( self : List[str] ): _A = super().outputs if self.task == "default": _A = {0: 'batch'} return outputs @property def lowerCAmelCase_ ( self : List[str] ): return 1E-4 @property def lowerCAmelCase_ ( self : int ): # needs to be >= 14 to support tril operator return max(super().default_onnx_opset , 14 ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : "PreTrainedTokenizerBase" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , ): _A = super().generate_dummy_inputs( preprocessor=_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase ) import torch # for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64) # makes the export fail randomly _A = torch.zeros_like(inputs['input_ids'] ) # make every second token global _A = 1 return inputs
315
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
1
"""simple docstring""" class lowercase_ : '''simple docstring''' def __init__( self : Optional[Any] ): _A = {} def lowerCAmelCase_ ( self : Dict ): print(self.vertex ) for i in self.vertex: print(_UpperCAmelCase , ' -> ' , ' -> '.join([str(_UpperCAmelCase ) for j in self.vertex[i]] ) ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ): # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(_UpperCAmelCase ) else: # else make a new vertex _A = [to_vertex] def lowerCAmelCase_ ( self : int ): # visited array for storing already visited nodes _A = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : list ): # mark start vertex as visited _A = True print(_UpperCAmelCase , end=' ' ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(_UpperCAmelCase , _UpperCAmelCase ) if __name__ == "__main__": a = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print('''DFS:''') g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
315
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: a = None a = logging.get_logger(__name__) a = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} a = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } a = { '''google/rembert''': 256, } a = '''▁''' class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[str] = VOCAB_FILES_NAMES UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Optional[int] = RemBertTokenizer def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str=True , _UpperCAmelCase : int=True , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]="[CLS]" , _UpperCAmelCase : List[str]="[SEP]" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : Any="[SEP]" , _UpperCAmelCase : Tuple="<pad>" , _UpperCAmelCase : Any="[CLS]" , _UpperCAmelCase : List[Any]="[MASK]" , **_UpperCAmelCase : List[Any] , ): # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) _A = do_lower_case _A = remove_space _A = keep_accents _A = vocab_file _A = False if not self.vocab_file else True def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1] def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error('Vocabulary path ({}) should be a directory'.format(_UpperCAmelCase ) ) return _A = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
315
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
1
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int a = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowercase_ ( datasets.BuilderConfig ): '''simple docstring''' UpperCAmelCase : Optional[datasets.Features] = None def _snake_case ( _snake_case : "pyspark.sql.DataFrame" , _snake_case : List[int] , ) -> Dict: '''simple docstring''' import pyspark def generate_fn(): _A = df.select('*' , pyspark.sql.functions.spark_partition_id().alias('part_id' ) ) for partition_id in partition_order: _A = df_with_partition_id.select('*' ).where(F'''part_id = {partition_id}''' ).drop('part_id' ) _A = partition_df.collect() _A = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowercase_ ( _BaseExamplesIterable ): '''simple docstring''' def __init__( self : str , _UpperCAmelCase : "pyspark.sql.DataFrame" , _UpperCAmelCase : Tuple=None , ): _A = df _A = partition_order or range(self.df.rdd.getNumPartitions() ) _A = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Optional[Any] ): yield from self.generate_examples_fn() def lowerCAmelCase_ ( self : int , _UpperCAmelCase : np.random.Generator ): _A = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(_UpperCAmelCase ) return SparkExamplesIterable(self.df , partition_order=_UpperCAmelCase ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int ): _A = self.split_shard_indices_by_worker(_UpperCAmelCase , _UpperCAmelCase ) return SparkExamplesIterable(self.df , partition_order=_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : int ): return len(self.partition_order ) class lowercase_ ( datasets.DatasetBuilder ): '''simple docstring''' UpperCAmelCase : Optional[int] = SparkConfig def __init__( self : int , _UpperCAmelCase : "pyspark.sql.DataFrame" , _UpperCAmelCase : str = None , _UpperCAmelCase : str = None , **_UpperCAmelCase : Optional[Any] , ): import pyspark _A = pyspark.sql.SparkSession.builder.getOrCreate() _A = df _A = working_dir super().__init__( cache_dir=_UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **_UpperCAmelCase , ) def lowerCAmelCase_ ( self : List[Any] ): # Returns the path of the created file. def create_cache_and_write_probe(_UpperCAmelCase : Union[str, Any] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=_UpperCAmelCase ) _A = os.path.join(self._cache_dir , 'fs_test' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(_UpperCAmelCase , 'a' ) return [probe_file] if self._spark.conf.get('spark.master' , '' ).startswith('local' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _A = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_UpperCAmelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( 'When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir' ) def lowerCAmelCase_ ( self : List[str] ): return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : datasets.download.download_manager.DownloadManager ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Any ): import pyspark def get_arrow_batch_size(_UpperCAmelCase : Optional[Any] ): for batch in it: yield pa.RecordBatch.from_pydict({'batch_bytes': [batch.nbytes]} ) _A = self.df.count() _A = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _A = ( self.df.limit(_UpperCAmelCase ) .repartition(1 ) .mapInArrow(_UpperCAmelCase , 'batch_bytes: long' ) .agg(pyspark.sql.functions.sum('batch_bytes' ).alias('sample_bytes' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _A = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _A = min(_UpperCAmelCase , int(approx_total_size / max_shard_size ) ) _A = self.df.repartition(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int , ): import pyspark _A = ParquetWriter if file_format == 'parquet' else ArrowWriter _A = os.path.join(self._working_dir , os.path.basename(_UpperCAmelCase ) ) if self._working_dir else fpath _A = file_format == 'parquet' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _A = self.config.features _A = self._writer_batch_size _A = self._fs.storage_options def write_arrow(_UpperCAmelCase : List[str] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _A = pyspark.TaskContext().taskAttemptId() _A = next(_UpperCAmelCase , _UpperCAmelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['task_id', 'num_examples', 'num_bytes'] , ) _A = 0 _A = writer_class( features=_UpperCAmelCase , path=working_fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , writer_batch_size=_UpperCAmelCase , storage_options=_UpperCAmelCase , embed_local_files=_UpperCAmelCase , ) _A = pa.Table.from_batches([first_batch] ) writer.write_table(_UpperCAmelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _A , _A = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) shard_id += 1 _A = writer_class( features=writer._features , path=working_fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , writer_batch_size=_UpperCAmelCase , storage_options=_UpperCAmelCase , embed_local_files=_UpperCAmelCase , ) _A = pa.Table.from_batches([batch] ) writer.write_table(_UpperCAmelCase ) if writer._num_bytes > 0: _A , _A = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['task_id', 'num_examples', 'num_bytes'] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(_UpperCAmelCase ) ): _A = os.path.join(os.path.dirname(_UpperCAmelCase ) , os.path.basename(_UpperCAmelCase ) ) shutil.move(_UpperCAmelCase , _UpperCAmelCase ) _A = ( self.df.mapInArrow(_UpperCAmelCase , 'task_id: long, num_examples: long, num_bytes: long' ) .groupBy('task_id' ) .agg( pyspark.sql.functions.sum('num_examples' ).alias('total_num_examples' ) , pyspark.sql.functions.sum('num_bytes' ).alias('total_num_bytes' ) , pyspark.sql.functions.count('num_bytes' ).alias('num_shards' ) , pyspark.sql.functions.collect_list('num_examples' ).alias('shard_lengths' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : "datasets.SplitGenerator" , _UpperCAmelCase : str = "arrow" , _UpperCAmelCase : Optional[Union[str, int]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Dict , ): self._validate_cache_dir() _A = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(_UpperCAmelCase ) _A = not is_remote_filesystem(self._fs ) _A = os.path.join if is_local else posixpath.join _A = '-TTTTT-SSSSS-of-NNNNN' _A = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _A = path_join(self._output_dir , _UpperCAmelCase ) _A = 0 _A = 0 _A = 0 _A = [] _A = [] for task_id, content in self._prepare_split_single(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): ( ( _A ) , ( _A ) , ( _A ) , ( _A ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(_UpperCAmelCase ) _A = total_num_examples _A = total_num_bytes # should rename everything at the end logger.debug(F'''Renaming {total_shards} shards.''' ) if total_shards > 1: _A = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _A = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , ): rename( _UpperCAmelCase , fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , fpath.replace('TTTTT-SSSSS' , F'''{global_shard_id:05d}''' ).replace('NNNNN' , F'''{total_shards:05d}''' ) , ) _A = [] _A = 0 for i in range(len(_UpperCAmelCase ) ): _A , _A = task_id_and_num_shards[i] for shard_id in range(_UpperCAmelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(_UpperCAmelCase , len(_UpperCAmelCase ) ).map(lambda _UpperCAmelCase : _rename_shard(*_UpperCAmelCase ) ).collect() else: # don't use any pattern _A = 0 _A = task_id_and_num_shards[0][0] self._rename( fpath.replace('SSSSS' , F'''{shard_id:05d}''' ).replace('TTTTT' , F'''{task_id:05d}''' ) , fpath.replace(_UpperCAmelCase , '' ) , ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : "datasets.SplitGenerator" , ): return SparkExamplesIterable(self.df )
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=100 , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : int=30 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : Any=5 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Dict=10 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : List[Any]=3 , ): _A = parent _A = vocab_size _A = batch_size _A = image_size _A = patch_size _A = num_channels _A = is_training _A = use_labels _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = type_sequence_label_size _A = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _A = (image_size // patch_size) ** 2 _A = num_patches + 1 def lowerCAmelCase_ ( self : int ): _A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, pixel_values, labels def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ): _A = FlaxBeitModel(config=_UpperCAmelCase ) _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ): _A = FlaxBeitForMaskedImageModeling(config=_UpperCAmelCase ) _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ): _A = self.type_sequence_label_size _A = FlaxBeitForImageClassification(config=_UpperCAmelCase ) _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _A = 1 _A = FlaxBeitForImageClassification(_UpperCAmelCase ) _A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _A = model(_UpperCAmelCase ) def lowerCAmelCase_ ( self : int ): _A = self.prepare_config_and_inputs() ( ( _A ) , ( _A ) , ( _A ) , ) = config_and_inputs _A = {'pixel_values': pixel_values} return config, inputs_dict @require_flax class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Any = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = FlaxBeitModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 ) def lowerCAmelCase_ ( self : List[str] ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _A = model_class(_UpperCAmelCase ) _A = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _A = [*signature.parameters.keys()] _A = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A , _A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): _A = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) _A = model_class(_UpperCAmelCase ) @jax.jit def model_jitted(_UpperCAmelCase : Dict , **_UpperCAmelCase : Optional[int] ): return model(pixel_values=_UpperCAmelCase , **_UpperCAmelCase ) with self.subTest('JIT Enabled' ): _A = model_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): _A = model_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Dict ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Tuple ): for model_class_name in self.all_model_classes: _A = model_class_name.from_pretrained('microsoft/beit-base-patch16-224' ) _A = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(_UpperCAmelCase ) def _snake_case ( ) -> Tuple: '''simple docstring''' _A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_vision @require_flax class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): return BeitImageProcessor.from_pretrained('microsoft/beit-base-patch16-224' ) if is_vision_available() else None @slow def lowerCAmelCase_ ( self : str ): _A = FlaxBeitForMaskedImageModeling.from_pretrained('microsoft/beit-base-patch16-224-pt22k' ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=_UpperCAmelCase , return_tensors='np' ).pixel_values # prepare bool_masked_pos _A = np.ones((1, 196) , dtype=_UpperCAmelCase ) # forward pass _A = model(pixel_values=_UpperCAmelCase , bool_masked_pos=_UpperCAmelCase ) _A = outputs.logits # verify the logits _A = (1, 196, 8_192) self.assertEqual(logits.shape , _UpperCAmelCase ) _A = np.array( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , _UpperCAmelCase , atol=1E-2 ) ) @slow def lowerCAmelCase_ ( self : Tuple ): _A = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-base-patch16-224' ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=_UpperCAmelCase , return_tensors='np' ) # forward pass _A = model(**_UpperCAmelCase ) _A = outputs.logits # verify the logits _A = (1, 1_000) self.assertEqual(logits.shape , _UpperCAmelCase ) _A = np.array([-1.2385, -1.0987, -1.0108] ) self.assertTrue(np.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) _A = 281 self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Dict ): _A = FlaxBeitForImageClassification.from_pretrained('microsoft/beit-large-patch16-224-pt22k-ft22k' ) _A = self.default_image_processor _A = prepare_img() _A = image_processor(images=_UpperCAmelCase , return_tensors='np' ) # forward pass _A = model(**_UpperCAmelCase ) _A = outputs.logits # verify the logits _A = (1, 21_841) self.assertEqual(logits.shape , _UpperCAmelCase ) _A = np.array([1.6881, -0.2787, 0.5901] ) self.assertTrue(np.allclose(logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) ) _A = 2_396 self.assertEqual(logits.argmax(-1 ).item() , _UpperCAmelCase )
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: a = None a = logging.get_logger(__name__) a = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} a = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } a = { '''moussaKam/mbarthez''': 1_024, '''moussaKam/barthez''': 1_024, '''moussaKam/barthez-orangesum-title''': 1_024, } a = '''▁''' class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : int = ['''input_ids''', '''attention_mask'''] UpperCAmelCase : Tuple = BarthezTokenizer def __init__( self : Optional[Any] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : int=None , _UpperCAmelCase : str="<s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : str="</s>" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Union[str, Any]="<unk>" , _UpperCAmelCase : List[Any]="<pad>" , _UpperCAmelCase : List[str]="<mask>" , **_UpperCAmelCase : Optional[int] , ): # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) _A = vocab_file _A = False if not self.vocab_file else True def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = '''▁''' a = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', } a = { '''vocab_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json''' ), }, '''spm_file''': { '''facebook/s2t-small-librispeech-asr''': ( '''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model''' ) }, } a = { '''facebook/s2t-small-librispeech-asr''': 1_024, } a = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de'''] a = {'''mustc''': MUSTC_LANGS} class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Tuple = VOCAB_FILES_NAMES UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Optional[int] = MAX_MODEL_INPUT_SIZES UpperCAmelCase : Optional[int] = ['''input_ids''', '''attention_mask'''] UpperCAmelCase : List[int] = [] def __init__( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : Optional[Any]="</s>" , _UpperCAmelCase : List[str]="<pad>" , _UpperCAmelCase : List[str]="<unk>" , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=False , _UpperCAmelCase : str=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Optional[Dict[str, Any]] = None , **_UpperCAmelCase : Any , ): _A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , ) _A = do_upper_case _A = do_lower_case _A = load_json(_UpperCAmelCase ) _A = {v: k for k, v in self.encoder.items()} _A = spm_file _A = load_spm(_UpperCAmelCase , self.sp_model_kwargs ) if lang_codes is not None: _A = lang_codes _A = LANGUAGES[lang_codes] _A = [F'''<lang:{lang}>''' for lang in self.langs] _A = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs} _A = self.lang_tokens _A = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: _A = {} @property def lowerCAmelCase_ ( self : Any ): return len(self.encoder ) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return self._tgt_lang @tgt_lang.setter def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] ): _A = new_tgt_lang self.set_tgt_lang_special_tokens(_UpperCAmelCase ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str ): _A = self.lang_code_to_id[tgt_lang] _A = [lang_code_id] def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str ): return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : int ): return self.decoder.get(_UpperCAmelCase , self.unk_token ) def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[str] ): _A = [] _A = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: _A = self.sp_model.decode(_UpperCAmelCase ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " _A = [] else: current_sub_tokens.append(_UpperCAmelCase ) _A = self.sp_model.decode(_UpperCAmelCase ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) _A = [1] * len(self.prefix_tokens ) _A = [1] if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def lowerCAmelCase_ ( self : Dict ): _A = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): _A = self.__dict__.copy() _A = None return state def __setstate__( self : Dict , _UpperCAmelCase : Dict ): _A = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): _A = {} _A = load_spm(self.spm_file , self.sp_model_kwargs ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): _A = Path(_UpperCAmelCase ) assert save_dir.is_dir(), F'''{save_directory} should be a directory''' _A = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) _A = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , _UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , _UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(_UpperCAmelCase , 'wb' ) as fi: _A = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (str(_UpperCAmelCase ), str(_UpperCAmelCase )) def _snake_case ( _snake_case : str , _snake_case : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: '''simple docstring''' _A = sentencepiece.SentencePieceProcessor(**_snake_case ) spm.Load(str(_snake_case ) ) return spm def _snake_case ( _snake_case : str ) -> Union[Dict, List]: '''simple docstring''' with open(_snake_case , 'r' ) as f: return json.load(_snake_case ) def _snake_case ( _snake_case : Optional[Any] , _snake_case : str ) -> None: '''simple docstring''' with open(_snake_case , 'w' ) as f: json.dump(_snake_case , _snake_case , indent=2 )
315
"""simple docstring""" import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ): super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) _A = eval_examples _A = post_process_function def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ): _A = self.eval_dataset if eval_dataset is None else eval_dataset _A = self.get_eval_dataloader(_UpperCAmelCase ) _A = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) else: _A = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) _A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase ) return metrics def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ): _A = self.get_test_dataloader(_UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. _A = self.compute_metrics _A = None _A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop _A = time.time() try: _A = eval_loop( _UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , ) finally: _A = compute_metrics _A = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output _A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' ) _A = self.compute_metrics(_UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): _A = metrics.pop(_UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
315
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging a = logging.get_logger(__name__) a = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Dict = '''perceiver''' def __init__( self : Tuple , _UpperCAmelCase : Optional[int]=256 , _UpperCAmelCase : List[Any]=1_280 , _UpperCAmelCase : int=768 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : List[Any]=26 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]="kv" , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Any=1 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : List[str]=0.1 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : List[str]=1E-1_2 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=262 , _UpperCAmelCase : Tuple=2_048 , _UpperCAmelCase : int=56 , _UpperCAmelCase : Tuple=[368, 496] , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Dict=1_920 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Any=[1, 16, 224, 224] , **_UpperCAmelCase : Dict , ): super().__init__(**_UpperCAmelCase ) _A = num_latents _A = d_latents _A = d_model _A = num_blocks _A = num_self_attends_per_block _A = num_self_attention_heads _A = num_cross_attention_heads _A = qk_channels _A = v_channels _A = cross_attention_shape_for_attention _A = self_attention_widening_factor _A = cross_attention_widening_factor _A = hidden_act _A = attention_probs_dropout_prob _A = initializer_range _A = layer_norm_eps _A = use_query_residual # masked language modeling attributes _A = vocab_size _A = max_position_embeddings # image classification attributes _A = image_size # flow attributes _A = train_size # multimodal autoencoding attributes _A = num_frames _A = audio_samples_per_frame _A = samples_per_patch _A = output_shape class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' @property def lowerCAmelCase_ ( self : Any ): if self.task == "multiple-choice": _A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def lowerCAmelCase_ ( self : List[str] ): return 1E-4 def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[TensorType] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(_UpperCAmelCase , _UpperCAmelCase ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _A = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _A = preprocessor.num_special_tokens_to_add(_UpperCAmelCase ) _A = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence _A = [' '.join(['a'] ) * seq_length] * batch_size _A = dict(preprocessor(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) ) _A = inputs.pop('input_ids' ) return inputs elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _A = compute_effective_axis_dimension(_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch ) _A = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _A = dict(preprocessor(images=_UpperCAmelCase , return_tensors=_UpperCAmelCase ) ) _A = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
315
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ) -> bool: '''simple docstring''' return numa ^ numa < 0 if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" import math def _snake_case ( _snake_case : int ) -> bool: '''simple docstring''' assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _A = range(3 , int(math.sqrt(_snake_case ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def _snake_case ( _snake_case : str , _snake_case : str=1 , **_snake_case : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' _A = factor * value _A = value while not is_prime(_snake_case ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **_snake_case ) return value
315
"""simple docstring""" import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _A = Vector() def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' ) def lowerCAmelCase_ ( self : Optional[int] ): _A = Vector([1, 2, 3, 4] ) self.assertEqual(len(_UpperCAmelCase ) , 4 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2] ) _A = Vector([1, 2, 3, 4, 5] ) _A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _A = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def lowerCAmelCase_ ( self : str ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def lowerCAmelCase_ ( self : int ): _A = Vector([1, 2, 3] ) _A = Vector([2, -1, 4] ) # for test of dot product _A = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' ) self.assertEqual((a * b) , 0 ) def lowerCAmelCase_ ( self : Dict ): self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 ) def lowerCAmelCase_ ( self : Tuple ): self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 2, 3] ) _A = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = Vector([1, 0, 0, 0, 0, 0] ) _A = x.copy() self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) ) def lowerCAmelCase_ ( self : str ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _A = Vector([1, 2, 3] ) self.assertEqual('(14,32,50)' , str(a * x ) ) self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) ) def lowerCAmelCase_ ( self : Any ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def lowerCAmelCase_ ( self : Tuple ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) ) def lowerCAmelCase_ ( self : int ): self.assertEqual( '|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
315
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a = logging.get_logger(__name__) a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} a = { '''tokenizer_file''': { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''', }, } a = { '''gpt-neox-20b''': 2_048, } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : str = VOCAB_FILES_NAMES UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Any=None , _UpperCAmelCase : Tuple="<|endoftext|>" , _UpperCAmelCase : Optional[int]="<|endoftext|>" , _UpperCAmelCase : Tuple="<|endoftext|>" , _UpperCAmelCase : Dict=False , **_UpperCAmelCase : int , ): super().__init__( _UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) _A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _UpperCAmelCase ) != add_prefix_space: _A = getattr(_UpperCAmelCase , pre_tok_state.pop('type' ) ) _A = add_prefix_space _A = pre_tok_class(**_UpperCAmelCase ) _A = add_prefix_space def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): _A = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : "Conversation" ): _A = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] ) if len(_UpperCAmelCase ) > self.model_max_length: _A = input_ids[-self.model_max_length :] return input_ids
315
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) a = { '''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''', '''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''', } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = '''xlnet''' UpperCAmelCase : List[Any] = ['''mems'''] UpperCAmelCase : Any = { '''n_token''': '''vocab_size''', # Backward compatibility '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict=32_000 , _UpperCAmelCase : List[str]=1_024 , _UpperCAmelCase : Any=24 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Union[str, Any]=4_096 , _UpperCAmelCase : Tuple="gelu" , _UpperCAmelCase : Any=True , _UpperCAmelCase : str="bi" , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-1_2 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Dict=None , _UpperCAmelCase : int=True , _UpperCAmelCase : int=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : int=-1 , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : Union[str, Any]="last" , _UpperCAmelCase : int=True , _UpperCAmelCase : str="tanh" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=5 , _UpperCAmelCase : Optional[Any]=5 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : List[str]=1 , _UpperCAmelCase : Dict=2 , **_UpperCAmelCase : int , ): _A = vocab_size _A = d_model _A = n_layer _A = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _A = d_model // n_head _A = ff_activation _A = d_inner _A = untie_r _A = attn_type _A = initializer_range _A = layer_norm_eps _A = dropout _A = mem_len _A = reuse_len _A = bi_data _A = clamp_len _A = same_length _A = summary_type _A = summary_use_proj _A = summary_activation _A = summary_last_dropout _A = start_n_top _A = end_n_top _A = bos_token_id _A = pad_token_id _A = eos_token_id if "use_cache" in kwargs: warnings.warn( 'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`' ' instead.' , _UpperCAmelCase , ) _A = kwargs['use_cache'] _A = use_mems_eval _A = use_mems_train super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] ): # Message copied from Transformer-XL documentation raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
315
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a = logging.get_logger(__name__) a = {'''vocab_file''': '''sentencepiece.model'''} a = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, } a = { '''google/rembert''': 256, } class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Tuple=False , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]="[CLS]" , _UpperCAmelCase : Tuple="[SEP]" , _UpperCAmelCase : Optional[int]="[UNK]" , _UpperCAmelCase : Optional[Any]="[SEP]" , _UpperCAmelCase : List[str]="[PAD]" , _UpperCAmelCase : Dict="[CLS]" , _UpperCAmelCase : Optional[int]="[MASK]" , **_UpperCAmelCase : Any , ): super().__init__( do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) _A = do_lower_case _A = remove_space _A = keep_accents _A = vocab_file _A = spm.SentencePieceProcessor() self.sp_model.Load(_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Union[str, Any] ): return len(self.sp_model ) def lowerCAmelCase_ ( self : Tuple ): _A = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int ): _A = self.__dict__.copy() _A = None return state def __setstate__( self : Any , _UpperCAmelCase : Any ): _A = d _A = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=False ): _A = self.sp_model.EncodeAsPieces(_UpperCAmelCase ) return pieces def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] ): return self.sp_model.PieceToId(_UpperCAmelCase ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Optional[int] ): return self.sp_model.IdToPiece(_UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Dict ): _A = self.sp_model.decode_pieces(_UpperCAmelCase ) return out_string def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1] def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ): if not os.path.isdir(_UpperCAmelCase ): logger.error('Vocabulary path ({}) should be a directory'.format(_UpperCAmelCase ) ) return _A = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) return (out_vocab_file,)
315
"""simple docstring""" import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a = { '''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), '''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), '''bert''': (BertConfig, BertForMaskedLM, BertTokenizer), '''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def _snake_case ( _snake_case : Tuple ) -> Dict: '''simple docstring''' assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts ) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config ) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights ) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False elif args.student_type == "gpt2": _A = False def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple: '''simple docstring''' if args.student_type == "roberta": _A = False def _snake_case ( ) -> Tuple: '''simple docstring''' _A = argparse.ArgumentParser(description='Training' ) parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' ) parser.add_argument( '--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' ) parser.add_argument( '--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , ) parser.add_argument( '--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , ) parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' ) parser.add_argument( '--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' ) parser.add_argument( '--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' ) parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' ) parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' ) parser.add_argument( '--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' ) parser.add_argument( '--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , ) parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' ) parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' ) parser.add_argument( '--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' ) parser.add_argument( '--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' ) parser.add_argument( '--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , ) parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' ) parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' ) parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' ) parser.add_argument( '--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , ) parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' ) parser.add_argument( '--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , ) parser.add_argument( '--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , ) parser.add_argument( '--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , ) parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' ) parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' ) parser.add_argument( '--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , ) parser.add_argument( '--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , ) parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' ) parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' ) parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' ) parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' ) parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' ) parser.add_argument( '--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , ) parser.add_argument( '--fp16_opt_level' , type=_snake_case , default='O1' , help=( 'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].' 'See details at https://nvidia.github.io/apex/amp.html' ) , ) parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' ) parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' ) parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' ) parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' ) parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' ) _A = parser.parse_args() sanity_checks(_snake_case ) # ARGS # init_gpu_params(_snake_case ) set_seed(_snake_case ) if args.is_master: if os.path.exists(args.dump_path ): if not args.force: raise ValueError( F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite''' ' itUse `--force` if you want to overwrite it' ) else: shutil.rmtree(args.dump_path ) if not os.path.exists(args.dump_path ): os.makedirs(args.dump_path ) logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' ) # SAVE PARAMS # logger.info(F'''Param: {args}''' ) with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f: json.dump(vars(_snake_case ) , _snake_case , indent=4 ) git_log(args.dump_path ) _A , _A , _A = MODEL_CLASSES[args.student_type] _A , _A , _A = MODEL_CLASSES[args.teacher_type] # TOKENIZER # _A = teacher_tokenizer_class.from_pretrained(args.teacher_name ) _A = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): _A = tokenizer.all_special_tokens.index(_snake_case ) _A = tokenizer.all_special_ids[idx] logger.info(F'''Special tokens {special_tok_ids}''' ) _A = special_tok_ids _A = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(F'''Loading data from {args.data_file}''' ) with open(args.data_file , 'rb' ) as fp: _A = pickle.load(_snake_case ) if args.mlm: logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' ) with open(args.token_counts , 'rb' ) as fp: _A = pickle.load(_snake_case ) _A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing for idx in special_tok_ids.values(): _A = 0.0 # do not predict special tokens _A = torch.from_numpy(_snake_case ) else: _A = None _A = LmSeqsDataset(params=_snake_case , data=_snake_case ) logger.info('Data loader created.' ) # STUDENT # logger.info(F'''Loading student config from {args.student_config}''' ) _A = student_config_class.from_pretrained(args.student_config ) _A = True if args.student_pretrained_weights is not None: logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' ) _A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case ) else: _A = student_model_class(_snake_case ) if args.n_gpu > 0: student.to(F'''cuda:{args.local_rank}''' ) logger.info('Student loaded.' ) # TEACHER # _A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case ) if args.n_gpu > 0: teacher.to(F'''cuda:{args.local_rank}''' ) logger.info(F'''Teacher loaded from {args.teacher_name}.''' ) # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_snake_case , _snake_case ) if args.freeze_token_type_embds: freeze_token_type_embeddings(_snake_case , _snake_case ) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0 ) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() _A = Distiller( params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case ) distiller.train() logger.info('Let\'s go get some drinks.' ) if __name__ == "__main__": main()
315
1
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ) -> List[Any]: '''simple docstring''' _A = HfArgumentParser(_snake_case ) _A = parser.parse_args_into_dataclasses()[0] _A = TensorFlowBenchmark(args=_snake_case ) try: _A = parser.parse_args_into_dataclasses()[0] except ValueError as e: _A = 'Arg --no_{0} is no longer used, please use --no-{0} instead.' _A = ' '.join(str(_snake_case ).split(' ' )[:-1] ) _A = '' _A = eval(str(_snake_case ).split(' ' )[-1] ) _A = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_snake_case ) if len(_snake_case ) > 0: _A = full_error_msg + begin_error_msg + str(_snake_case ) raise ValueError(_snake_case ) benchmark.run() if __name__ == "__main__": main()
315
"""simple docstring""" from manim import * class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Dict ): _A = Rectangle(height=0.5 , width=0.5 ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _A = Rectangle(height=0.25 , width=0.25 ) _A = [mem.copy() for i in range(6 )] _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('CPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(4 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('GPU' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(_UpperCAmelCase ) _A = [mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Model' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(_UpperCAmelCase ) _A = [] _A = [] for i, rect in enumerate(_UpperCAmelCase ): _A = fill.copy().set_fill(_UpperCAmelCase , opacity=0.8 ) target.move_to(_UpperCAmelCase ) model_arr.append(_UpperCAmelCase ) _A = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(_UpperCAmelCase ) self.add(*_UpperCAmelCase , *_UpperCAmelCase ) _A = [meta_mem.copy() for i in range(6 )] _A = [meta_mem.copy() for i in range(6 )] _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 ) _A = Text('Disk' , font_size=24 ) _A = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase ) disk.move_to([-4, -1.25, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _A = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_UpperCAmelCase , _UpperCAmelCase ) _A = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_UpperCAmelCase ) _A = MarkupText( F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase ) ) _A = Square(0.3 ) input.set_fill(_UpperCAmelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , _UpperCAmelCase , buff=0.5 ) self.play(Write(_UpperCAmelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=_UpperCAmelCase , buff=0.02 ) self.play(MoveToTarget(_UpperCAmelCase ) ) self.play(FadeOut(_UpperCAmelCase ) ) _A = Arrow(start=_UpperCAmelCase , end=_UpperCAmelCase , color=_UpperCAmelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , _UpperCAmelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _A = MarkupText( F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) ) _A = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(_UpperCAmelCase ) , Circumscribe(model_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _A = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , _UpperCAmelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) _A = AnimationGroup( FadeOut(_UpperCAmelCase , run_time=0.5 ) , MoveToTarget(_UpperCAmelCase , run_time=0.5 ) , FadeIn(_UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(_UpperCAmelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _A = 0.7 self.play( Circumscribe(model_arr[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=_UpperCAmelCase , **_UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=_UpperCAmelCase , **_UpperCAmelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _A = a_c _A = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(_UpperCAmelCase ) , FadeOut(_UpperCAmelCase , run_time=0.5 ) , ) _A = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_UpperCAmelCase , run_time=3 ) , MoveToTarget(_UpperCAmelCase ) ) self.wait()
315
1
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class lowercase_ : '''simple docstring''' def __init__( self : int , _UpperCAmelCase : Union[str, Any] ): _A = str(id_ ) _A = None _A = None _A = [] _A = {} # {vertex:distance} def __lt__( self : Optional[Any] , _UpperCAmelCase : List[Any] ): return self.key < other.key def __repr__( self : List[Any] ): return self.id def lowerCAmelCase_ ( self : str , _UpperCAmelCase : int ): self.neighbors.append(_UpperCAmelCase ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ): _A = weight def _snake_case ( _snake_case : Optional[Any] , _snake_case : int , _snake_case : int , _snake_case : Union[str, Any] ) -> int: '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , _snake_case ) graph[b - 1].add_edge(graph[a - 1] , _snake_case ) def _snake_case ( _snake_case : list , _snake_case : Vertex ) -> list: '''simple docstring''' _A = [] for u in graph: _A = math.inf _A = None _A = 0 _A = graph[:] while q: _A = min(_snake_case ) q.remove(_snake_case ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _A = u _A = u.edges[v.id] for i in range(1 , len(_snake_case ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def _snake_case ( _snake_case : list , _snake_case : Vertex ) -> Iterator[tuple]: '''simple docstring''' for u in graph: _A = math.inf _A = None _A = 0 _A = list(_snake_case ) hq.heapify(_snake_case ) while h: _A = hq.heappop(_snake_case ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _A = u _A = u.edges[v.id] hq.heapify(_snake_case ) for i in range(1 , len(_snake_case ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def _snake_case ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
1
"""simple docstring""" a = {str(digit): digit**5 for digit in range(10)} def _snake_case ( _snake_case : int ) -> int: '''simple docstring''' return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_snake_case ) ) def _snake_case ( ) -> int: '''simple docstring''' return sum( number for number in range(10_00 , 1_00_00_00 ) if number == digits_fifth_powers_sum(_snake_case ) ) if __name__ == "__main__": print(solution())
315
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Any = ['''input_values''', '''attention_mask'''] def __init__( self : Dict , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 16_000 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 80 , _UpperCAmelCase : int = 16 , _UpperCAmelCase : int = 64 , _UpperCAmelCase : str = "hann_window" , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : float = 80 , _UpperCAmelCase : float = 7_600 , _UpperCAmelCase : float = 1E-1_0 , _UpperCAmelCase : int = 2 , _UpperCAmelCase : bool = True , **_UpperCAmelCase : List[Any] , ): super().__init__(feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase ) _A = do_normalize _A = return_attention_mask _A = num_mel_bins _A = hop_length _A = win_length _A = win_function _A = frame_signal_scale _A = fmin _A = fmax _A = mel_floor _A = reduction_factor _A = win_length * sampling_rate // 1_000 _A = hop_length * sampling_rate // 1_000 _A = optimal_fft_length(self.sample_size ) _A = (self.n_fft // 2) + 1 _A = window_function(window_length=self.sample_size , name=self.win_function , periodic=_UpperCAmelCase ) _A = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , ) if frame_signal_scale != 1.0: warnings.warn( 'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) if reduction_factor != 2.0: warnings.warn( 'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _UpperCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def lowerCAmelCase_ ( _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : List[np.ndarray] , _UpperCAmelCase : float = 0.0 ): if attention_mask is not None: _A = np.array(_UpperCAmelCase , np.intaa ) _A = [] for vector, length in zip(_UpperCAmelCase , attention_mask.sum(-1 ) ): _A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _A = padding_value normed_input_values.append(_UpperCAmelCase ) else: _A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : np.ndarray , ): _A = spectrogram( _UpperCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , ) return log_mel_spec.T def __call__( self : int , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : Optional[int] , ): if audio is None and audio_target is None: raise ValueError('You must provide either `audio` or `audio_target` values.' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' F''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the ``sampling_rate`` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if audio is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) else: _A = None if audio_target is not None: _A = self._process_audio( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase , ) if inputs is None: return inputs_target else: _A = inputs_target['input_values'] _A = inputs_target.get('attention_mask' ) if decoder_attention_mask is not None: _A = decoder_attention_mask return inputs def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCAmelCase : bool = False , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = isinstance(_UpperCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _A = is_batched_numpy or ( isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ): _A = np.asarray(_UpperCAmelCase , dtype=np.floataa ) elif isinstance(_UpperCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _A = speech.astype(np.floataa ) # always return batch if not is_batched: _A = [speech] # needed to make pad() work on spectrogram inputs _A = self.feature_size # convert into correct format for padding if is_target: _A = [self._extract_mel_features(_UpperCAmelCase ) for waveform in speech] _A = BatchFeature({'input_values': features} ) _A = self.num_mel_bins else: _A = BatchFeature({'input_values': speech} ) _A = self.pad( _UpperCAmelCase , padding=_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , **_UpperCAmelCase , ) _A = feature_size_hack # convert input values to correct format _A = padded_inputs['input_values'] if not isinstance(input_values[0] , np.ndarray ): _A = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(_UpperCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _A = [array.astype(np.floataa ) for array in input_values] elif isinstance(_UpperCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _A = input_values.astype(np.floataa ) # convert attention_mask to correct format _A = padded_inputs.get('attention_mask' ) if attention_mask is not None: _A = [np.asarray(_UpperCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _A = ( attention_mask if self._get_padding_strategies(_UpperCAmelCase , max_length=_UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) _A = self.zero_mean_unit_var_norm( padded_inputs['input_values'] , attention_mask=_UpperCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: _A = padded_inputs.convert_to_tensors(_UpperCAmelCase ) return padded_inputs def lowerCAmelCase_ ( self : Any ): _A = super().to_dict() # Don't serialize these as they are derived from the other properties. _A = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs'] for name in names: if name in output: del output[name] return output
315
1
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def _snake_case ( _snake_case : int = 1_00_00_00 , _snake_case : int = 10 ) -> int: '''simple docstring''' _A = defaultdict(_snake_case ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _A = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _A = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_snake_case , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F'''{solution() = }''')
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [] create_all_state(1 , _snake_case , _snake_case , [] , _snake_case ) return result def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None: '''simple docstring''' if level == 0: total_list.append(current_list[:] ) return for i in range(_snake_case , total_number - level + 2 ): current_list.append(_snake_case ) create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case ) current_list.pop() def _snake_case ( _snake_case : list[list[int]] ) -> None: '''simple docstring''' for i in total_list: print(*_snake_case ) if __name__ == "__main__": a = 4 a = 2 a = generate_all_combinations(n, k) print_all_state(total_list)
315
1
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 a = sys.version_info >= (3, 10) def _snake_case ( _snake_case : Tuple=None , _snake_case : Dict=None ) -> Any: '''simple docstring''' return field(default_factory=lambda: default , metadata=_snake_case ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float UpperCAmelCase : str UpperCAmelCase : bool @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int = 42 UpperCAmelCase : str = field(default='''toto''' , metadata={'''help''': '''help message'''} ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : bool = False UpperCAmelCase : bool = True UpperCAmelCase : Optional[bool] = None class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Tuple = '''titi''' UpperCAmelCase : int = '''toto''' class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Union[str, Any] = '''titi''' UpperCAmelCase : Any = '''toto''' UpperCAmelCase : str = 42 @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : BasicEnum = "toto" def lowerCAmelCase_ ( self : List[Any] ): _A = BasicEnum(self.foo ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : MixedTypeEnum = "toto" def lowerCAmelCase_ ( self : Union[str, Any] ): _A = MixedTypeEnum(self.foo ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[int] = None UpperCAmelCase : Optional[float] = field(default=__lowerCAmelCase , metadata={'''help''': '''help message'''} ) UpperCAmelCase : Optional[str] = None UpperCAmelCase : Optional[List[str]] = list_field(default=[] ) UpperCAmelCase : Optional[List[int]] = list_field(default=[] ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : List[int] = list_field(default=[] ) UpperCAmelCase : List[int] = list_field(default=[1, 2, 3] ) UpperCAmelCase : List[str] = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) UpperCAmelCase : List[float] = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : List[int] = field() UpperCAmelCase : str = field() UpperCAmelCase : BasicEnum = field() def lowerCAmelCase_ ( self : List[Any] ): _A = BasicEnum(self.required_enum ) @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : "BasicEnum" = field() UpperCAmelCase : "Optional[bool]" = None UpperCAmelCase : "str" = field(default='''toto''' , metadata={'''help''': '''help message'''} ) UpperCAmelCase : "List[str]" = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : bool = False UpperCAmelCase : bool = True UpperCAmelCase : bool | None = None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : int | None = None UpperCAmelCase : float | None = field(default=__lowerCAmelCase , metadata={'''help''': '''help message'''} ) UpperCAmelCase : str | None = None UpperCAmelCase : list[str] | None = list_field(default=[] ) UpperCAmelCase : list[int] | None = list_field(default=[] ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : argparse.ArgumentParser , _UpperCAmelCase : argparse.ArgumentParser ): self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _A = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'} _A = {k: v for k, v in vars(_UpperCAmelCase ).items() if k != 'container'} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , _UpperCAmelCase ) and yy.get('choices' , _UpperCAmelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](_UpperCAmelCase ) , yy['type'](_UpperCAmelCase ) ) del xx["type"], yy["type"] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument('--bar' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument('--baz' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument('--flag' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = ['--foo', '1', '--baz', 'quux', '--bar', '0.5'] ((_A) , ) = parser.parse_args_into_dataclasses(_UpperCAmelCase , look_for_args_file=_UpperCAmelCase ) self.assertFalse(example.flag ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--foo' , default=42 , type=_UpperCAmelCase ) expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = argparse.ArgumentParser() expected.add_argument('--foo' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' ) expected.add_argument('--baz' , type=_UpperCAmelCase , default=_UpperCAmelCase , const=_UpperCAmelCase , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=_UpperCAmelCase , dest='baz' ) expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase ) _A = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(_UpperCAmelCase ) for dataclass_type in dataclass_types: _A = HfArgumentParser(_UpperCAmelCase ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) _A = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) _A = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) _A = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) _A = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , baz=_UpperCAmelCase , opt=_UpperCAmelCase ) ) def lowerCAmelCase_ ( self : Optional[int] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) _A = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _A = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) _A = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _A = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) _A = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowerCAmelCase_ ( self : Dict ): @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Literal["titi", "toto", 42] = "toto" _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) _A = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) _A = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=_UpperCAmelCase ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=_UpperCAmelCase ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=_UpperCAmelCase ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual( _UpperCAmelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) _A = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(_UpperCAmelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def lowerCAmelCase_ ( self : List[Any] ): _A = argparse.ArgumentParser() expected.add_argument('--foo' , default=_UpperCAmelCase , type=_UpperCAmelCase ) expected.add_argument('--bar' , default=_UpperCAmelCase , type=_UpperCAmelCase , help='help message' ) expected.add_argument('--baz' , default=_UpperCAmelCase , type=_UpperCAmelCase ) expected.add_argument('--ces' , nargs='+' , default=[] , type=_UpperCAmelCase ) expected.add_argument('--des' , nargs='+' , default=[] , type=_UpperCAmelCase ) _A = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(_UpperCAmelCase ) for dataclass_type in dataclass_types: _A = HfArgumentParser(_UpperCAmelCase ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_args([] ) self.assertEqual(_UpperCAmelCase , Namespace(foo=_UpperCAmelCase , bar=_UpperCAmelCase , baz=_UpperCAmelCase , ces=[] , des=[] ) ) _A = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(_UpperCAmelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def lowerCAmelCase_ ( self : List[str] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument('--required_str' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Tuple ): _A = HfArgumentParser(_UpperCAmelCase ) _A = argparse.ArgumentParser() expected.add_argument('--foo' , type=_UpperCAmelCase , required=_UpperCAmelCase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=_UpperCAmelCase , ) expected.add_argument('--opt' , type=_UpperCAmelCase , default=_UpperCAmelCase ) expected.add_argument('--baz' , default='toto' , type=_UpperCAmelCase , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=_UpperCAmelCase ) self.argparsersEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : str ): _A = HfArgumentParser(_UpperCAmelCase ) _A = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } _A = parser.parse_dict(_UpperCAmelCase )[0] _A = BasicExample(**_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : str ): _A = HfArgumentParser(_UpperCAmelCase ) _A = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, 'extra': 42, } self.assertRaises(_UpperCAmelCase , parser.parse_dict , _UpperCAmelCase , allow_extra_keys=_UpperCAmelCase ) def lowerCAmelCase_ ( self : int ): _A = HfArgumentParser(_UpperCAmelCase ) _A = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: _A = os.path.join(_UpperCAmelCase , 'temp_json' ) os.mkdir(_UpperCAmelCase ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] _A = BasicExample(**_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _A = HfArgumentParser(_UpperCAmelCase ) _A = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: _A = os.path.join(_UpperCAmelCase , 'temp_yaml' ) os.mkdir(_UpperCAmelCase ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(_UpperCAmelCase , _UpperCAmelCase ) _A = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] _A = BasicExample(**_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = HfArgumentParser(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase )
315
"""simple docstring""" def _snake_case ( _snake_case : int = 10_00 ) -> int: '''simple docstring''' return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) ) if __name__ == "__main__": print(solution())
315
1
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
315
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[str] ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=True ): _A = () for resnet, attn in zip(self.resnets , self.attentions ): _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : List[Any] ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=_UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_downsample: _A = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str]=True ): _A = () for resnet in self.resnets: _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) output_states += (hidden_states,) if self.add_downsample: _A = self.downsamplers_a(_UpperCAmelCase ) output_states += (hidden_states,) return hidden_states, output_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = resnets _A = attentions if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any]=True ): for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : bool = True UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Any ): _A = [] for i in range(self.num_layers ): _A = self.in_channels if (i == self.num_layers - 1) else self.out_channels _A = self.prev_output_channel if i == 0 else self.out_channels _A = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets if self.add_upsample: _A = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int]=True ): for resnet in self.resnets: # pop res hidden states _A = res_hidden_states_tuple[-1] _A = res_hidden_states_tuple[:-1] _A = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) if self.add_upsample: _A = self.upsamplers_a(_UpperCAmelCase ) return hidden_states class lowercase_ ( nn.Module ): '''simple docstring''' UpperCAmelCase : int UpperCAmelCase : float = 0.0 UpperCAmelCase : int = 1 UpperCAmelCase : int = 1 UpperCAmelCase : bool = False UpperCAmelCase : bool = False UpperCAmelCase : jnp.dtype = jnp.floataa def lowerCAmelCase_ ( self : Dict ): # there is always at least one resnet _A = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] _A = [] for _ in range(self.num_layers ): _A = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(_UpperCAmelCase ) _A = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(_UpperCAmelCase ) _A = resnets _A = attentions def __call__( self : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int]=True ): _A = self.resnets[0](_UpperCAmelCase , _UpperCAmelCase ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): _A = attn(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) _A = resnet(_UpperCAmelCase , _UpperCAmelCase , deterministic=_UpperCAmelCase ) return hidden_states
315
1
"""simple docstring""" import copy import re class lowercase_ : '''simple docstring''' UpperCAmelCase : Union[str, Any] = '''hp''' UpperCAmelCase : str = {} UpperCAmelCase : List[Any] = None @classmethod def lowerCAmelCase_ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ): _A = prefix _A = defaults cls.build_naming_info() @staticmethod def lowerCAmelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : int ): if len(_UpperCAmelCase ) == 0: return "" _A = None if any(char.isdigit() for char in word ): raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ): _A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(_UpperCAmelCase : str ): _A = '' while integer != 0: _A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s _A = 0 while True: _A = word + '#' + int_to_alphabetic(_UpperCAmelCase ) if sword in info["reverse_short_word"]: continue else: _A = sword break _A = short_word _A = word return short_word @staticmethod def lowerCAmelCase_ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : int ): _A = param_name.split('_' ) _A = [TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _A = ['', '_'] for separator in separators: _A = separator.join(_UpperCAmelCase ) if shortname not in info["reverse_short_param"]: _A = shortname _A = param_name return shortname return param_name @staticmethod def lowerCAmelCase_ ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] ): _A = TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase ) _A = short_name _A = param_name @classmethod def lowerCAmelCase_ ( cls : Dict ): if cls.NAMING_INFO is not None: return _A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } _A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase ) _A = info @classmethod def lowerCAmelCase_ ( cls : List[Any] , _UpperCAmelCase : List[Any] ): cls.build_naming_info() assert cls.PREFIX is not None _A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _A = cls.NAMING_INFO['short_param'][k] if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _A = 1 if v else 0 _A = '' if isinstance(_UpperCAmelCase , (int, float) ) else '-' _A = F'''{key}{sep}{v}''' name.append(_UpperCAmelCase ) return "_".join(_UpperCAmelCase ) @classmethod def lowerCAmelCase_ ( cls : Tuple , _UpperCAmelCase : List[Any] ): _A = repr[len(cls.PREFIX ) + 1 :] if repr == "": _A = [] else: _A = repr.split('_' ) _A = {} for value in values: if "-" in value: _A , _A = value.split('-' ) else: _A = re.sub('[0-9.]' , '' , _UpperCAmelCase ) _A = float(re.sub('[^0-9.]' , '' , _UpperCAmelCase ) ) _A = cls.NAMING_INFO['reverse_short_param'][p_k] _A = p_v for k in cls.DEFAULTS: if k not in parameters: _A = cls.DEFAULTS[k] return parameters
315
"""simple docstring""" import numpy class lowercase_ : '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ): _A = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. _A = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. _A = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. _A = numpy.random.rand(3 , 1 ) # Real output values provided. _A = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. _A = numpy.zeros(output_array.shape ) def lowerCAmelCase_ ( self : List[str] ): _A = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def lowerCAmelCase_ ( self : Optional[int] ): _A = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) _A = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) _A = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ): for iteration in range(1 , iterations + 1 ): _A = self.feedforward() self.back_propagation() if give_loss: _A = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ): _A = input_arr _A = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) _A = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) _A = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray: '''simple docstring''' return (value) * (1 - (value)) def _snake_case ( ) -> int: '''simple docstring''' _A = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. _A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. _A = TwoHiddenLayerNeuralNetwork( input_array=_snake_case , output_array=_snake_case ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
315
1
"""simple docstring""" import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : str=True , _UpperCAmelCase : Dict=99 , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : List[Any]=5 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : str=64 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : Optional[int]=512 , _UpperCAmelCase : Union[str, Any]=16 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Any=2 , _UpperCAmelCase : int=2 , _UpperCAmelCase : str=2 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : Dict=1 , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_input_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_labels _A = num_choices _A = scope _A = q_groups _A = k_groups _A = v_groups _A = post_attention_groups _A = intermediate_groups _A = output_groups def lowerCAmelCase_ ( self : Optional[int] ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_input_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = None _A = None _A = None if self.use_labels: _A = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _A = ids_tensor([self.batch_size] , self.num_choices ) _A = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self : Union[str, Any] ): return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] ): _A = SqueezeBertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , _UpperCAmelCase ) _A = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] ): _A = SqueezeBertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ): _A = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ): _A = self.num_labels _A = SqueezeBertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ): _A = self.num_labels _A = SqueezeBertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ): _A = self.num_choices _A = SqueezeBertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _A = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Any ): _A = self.prepare_config_and_inputs() ((_A) , (_A) , (_A) , (_A) , (_A) , (_A)) = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Tuple = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) UpperCAmelCase : Optional[int] = ( { '''feature-extraction''': SqueezeBertModel, '''fill-mask''': SqueezeBertForMaskedLM, '''question-answering''': SqueezeBertForQuestionAnswering, '''text-classification''': SqueezeBertForSequenceClassification, '''token-classification''': SqueezeBertForTokenClassification, '''zero-shot''': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = False def lowerCAmelCase_ ( self : List[Any] ): _A = SqueezeBertModelTester(self ) _A = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 ) def lowerCAmelCase_ ( self : int ): self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : List[str] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[Any] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : List[str] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[int] ): _A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : int ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = SqueezeBertModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_torch class lowercase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) _A = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] ) _A = model(_UpperCAmelCase )[0] _A = torch.Size((1, 3) ) self.assertEqual(output.shape , _UpperCAmelCase ) _A = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-4 ) )
315
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar a = TypeVar('''T''') class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , _UpperCAmelCase : T ): _A = data _A = None def __str__( self : str ): return F'''{self.data}''' class lowercase_ ( Generic[T] ): '''simple docstring''' def __init__( self : Tuple ): _A = None def __iter__( self : List[Any] ): _A = self.top while node: yield node.data _A = node.next def __str__( self : Union[str, Any] ): return "->".join([str(_UpperCAmelCase ) for item in self] ) def __len__( self : List[Any] ): return len(tuple(iter(self ) ) ) def lowerCAmelCase_ ( self : str ): return self.top is None def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : T ): _A = Node(_UpperCAmelCase ) if not self.is_empty(): _A = self.top _A = node def lowerCAmelCase_ ( self : Dict ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _UpperCAmelCase ) _A = self.top _A = self.top.next return pop_node.data def lowerCAmelCase_ ( self : Tuple ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def lowerCAmelCase_ ( self : Optional[Any] ): _A = None if __name__ == "__main__": from doctest import testmod testmod()
315
1
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple=13 , _UpperCAmelCase : Tuple=7 , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Dict=True , _UpperCAmelCase : str=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Union[str, Any]=99 , _UpperCAmelCase : Any=32 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : int=4 , _UpperCAmelCase : str=37 , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Dict=16 , _UpperCAmelCase : Any=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : List[str]=4 , ): _A = parent _A = batch_size _A = seq_length _A = is_training _A = use_attention_mask _A = use_token_type_ids _A = use_labels _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = type_vocab_size _A = type_sequence_label_size _A = initializer_range _A = num_choices def lowerCAmelCase_ ( self : Any ): _A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A = None if self.use_attention_mask: _A = random_attention_mask([self.batch_size, self.seq_length] ) _A = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_UpperCAmelCase , ) return config, input_ids, attention_mask def lowerCAmelCase_ ( self : Optional[int] ): _A = self.prepare_config_and_inputs() _A , _A , _A = config_and_inputs _A = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class lowercase_ ( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCAmelCase : Dict = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCAmelCase_ ( self : Optional[int] ): _A = FlaxDistilBertModelTester(self ) @slow def lowerCAmelCase_ ( self : List[Any] ): for model_class_name in self.all_model_classes: _A = model_class_name.from_pretrained('distilbert-base-uncased' ) _A = model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class lowercase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : List[Any] ): _A = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' ) _A = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) _A = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) _A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] _A = (1, 11, 768) self.assertEqual(output.shape , _UpperCAmelCase ) _A = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1E-4 ) )
315
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ): warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , _UpperCAmelCase , ) super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
315
1
"""simple docstring""" import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) a = logging.get_logger(__name__) a = OrderedDict( [ ('''align''', '''EfficientNetImageProcessor'''), ('''beit''', '''BeitImageProcessor'''), ('''bit''', '''BitImageProcessor'''), ('''blip''', '''BlipImageProcessor'''), ('''blip-2''', '''BlipImageProcessor'''), ('''bridgetower''', '''BridgeTowerImageProcessor'''), ('''chinese_clip''', '''ChineseCLIPImageProcessor'''), ('''clip''', '''CLIPImageProcessor'''), ('''clipseg''', '''ViTImageProcessor'''), ('''conditional_detr''', '''ConditionalDetrImageProcessor'''), ('''convnext''', '''ConvNextImageProcessor'''), ('''convnextv2''', '''ConvNextImageProcessor'''), ('''cvt''', '''ConvNextImageProcessor'''), ('''data2vec-vision''', '''BeitImageProcessor'''), ('''deformable_detr''', '''DeformableDetrImageProcessor'''), ('''deit''', '''DeiTImageProcessor'''), ('''deta''', '''DetaImageProcessor'''), ('''detr''', '''DetrImageProcessor'''), ('''dinat''', '''ViTImageProcessor'''), ('''donut-swin''', '''DonutImageProcessor'''), ('''dpt''', '''DPTImageProcessor'''), ('''efficientformer''', '''EfficientFormerImageProcessor'''), ('''efficientnet''', '''EfficientNetImageProcessor'''), ('''flava''', '''FlavaImageProcessor'''), ('''focalnet''', '''BitImageProcessor'''), ('''git''', '''CLIPImageProcessor'''), ('''glpn''', '''GLPNImageProcessor'''), ('''groupvit''', '''CLIPImageProcessor'''), ('''imagegpt''', '''ImageGPTImageProcessor'''), ('''instructblip''', '''BlipImageProcessor'''), ('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''), ('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''), ('''levit''', '''LevitImageProcessor'''), ('''mask2former''', '''Mask2FormerImageProcessor'''), ('''maskformer''', '''MaskFormerImageProcessor'''), ('''mgp-str''', '''ViTImageProcessor'''), ('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''), ('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevit''', '''MobileViTImageProcessor'''), ('''mobilevitv2''', '''MobileViTImageProcessor'''), ('''nat''', '''ViTImageProcessor'''), ('''oneformer''', '''OneFormerImageProcessor'''), ('''owlvit''', '''OwlViTImageProcessor'''), ('''perceiver''', '''PerceiverImageProcessor'''), ('''pix2struct''', '''Pix2StructImageProcessor'''), ('''poolformer''', '''PoolFormerImageProcessor'''), ('''regnet''', '''ConvNextImageProcessor'''), ('''resnet''', '''ConvNextImageProcessor'''), ('''sam''', '''SamImageProcessor'''), ('''segformer''', '''SegformerImageProcessor'''), ('''swiftformer''', '''ViTImageProcessor'''), ('''swin''', '''ViTImageProcessor'''), ('''swin2sr''', '''Swin2SRImageProcessor'''), ('''swinv2''', '''ViTImageProcessor'''), ('''table-transformer''', '''DetrImageProcessor'''), ('''timesformer''', '''VideoMAEImageProcessor'''), ('''tvlt''', '''TvltImageProcessor'''), ('''upernet''', '''SegformerImageProcessor'''), ('''van''', '''ConvNextImageProcessor'''), ('''videomae''', '''VideoMAEImageProcessor'''), ('''vilt''', '''ViltImageProcessor'''), ('''vit''', '''ViTImageProcessor'''), ('''vit_hybrid''', '''ViTHybridImageProcessor'''), ('''vit_mae''', '''ViTImageProcessor'''), ('''vit_msn''', '''ViTImageProcessor'''), ('''xclip''', '''CLIPImageProcessor'''), ('''yolos''', '''YolosImageProcessor'''), ] ) a = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def _snake_case ( _snake_case : str ) -> Optional[Any]: '''simple docstring''' for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: _A = model_type_to_module_name(_snake_case ) _A = importlib.import_module(F'''.{module_name}''' , 'transformers.models' ) try: return getattr(_snake_case , _snake_case ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(_snake_case , '__name__' , _snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _A = importlib.import_module('transformers' ) if hasattr(_snake_case , _snake_case ): return getattr(_snake_case , _snake_case ) return None def _snake_case ( _snake_case : Union[str, os.PathLike] , _snake_case : Optional[Union[str, os.PathLike]] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : Optional[Dict[str, str]] = None , _snake_case : Optional[Union[bool, str]] = None , _snake_case : Optional[str] = None , _snake_case : bool = False , **_snake_case : Tuple , ) -> Union[str, Any]: '''simple docstring''' _A = get_file_from_repo( _snake_case , _snake_case , cache_dir=_snake_case , force_download=_snake_case , resume_download=_snake_case , proxies=_snake_case , use_auth_token=_snake_case , revision=_snake_case , local_files_only=_snake_case , ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(_snake_case , encoding='utf-8' ) as reader: return json.load(_snake_case ) class lowercase_ : '''simple docstring''' def __init__( self : List[Any] ): raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(_UpperCAmelCase ) def lowerCAmelCase_ ( cls : Any , _UpperCAmelCase : str , **_UpperCAmelCase : Any ): _A = kwargs.pop('config' , _UpperCAmelCase ) _A = kwargs.pop('trust_remote_code' , _UpperCAmelCase ) _A = True _A , _A = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase , **_UpperCAmelCase ) _A = config_dict.get('image_processor_type' , _UpperCAmelCase ) _A = None if "AutoImageProcessor" in config_dict.get('auto_map' , {} ): _A = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: _A = config_dict.pop('feature_extractor_type' , _UpperCAmelCase ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) _A = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ): _A = config_dict['auto_map']['AutoFeatureExtractor'] _A = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): _A = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase ) # It could be in `config.image_processor_type`` _A = getattr(_UpperCAmelCase , 'image_processor_type' , _UpperCAmelCase ) if hasattr(_UpperCAmelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map: _A = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: _A = image_processor_class_from_name(_UpperCAmelCase ) _A = image_processor_auto_map is not None _A = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING _A = resolve_trust_remote_code( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if has_remote_code and trust_remote_code: _A = get_class_from_dynamic_module( _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) _A = kwargs.pop('code_revision' , _UpperCAmelCase ) if os.path.isdir(_UpperCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING: _A = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )] return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase ) raise ValueError( F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowerCAmelCase_ ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] ): IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase , _UpperCAmelCase )
315
"""simple docstring""" from __future__ import annotations import collections import pprint from pathlib import Path def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(sorted(_snake_case ) ) def _snake_case ( _snake_case : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(_snake_case )] a = Path(__file__).parent.joinpath('''words.txt''').read_text(encoding='''utf-8''') a = sorted({word.strip().lower() for word in data.splitlines()}) a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('''anagrams.txt''', '''w''') as file: file.write('''all_anagrams = \n ''') file.write(pprint.pformat(all_anagrams))
315
1
"""simple docstring""" import datasets from .evaluate import evaluate a = '''\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } ''' a = ''' This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. ''' a = ''' Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': the text of the answer references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the SQuAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}] >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}] >>> squad_metric = datasets.load_metric("squad") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Union[str, Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )}, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , ) def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ): _A = {prediction['id']: prediction['prediction_text'] for prediction in predictions} _A = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] _A = evaluate(dataset=_UpperCAmelCase , predictions=_UpperCAmelCase ) return score
315
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version a = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : Optional[str] = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''The column name of the images in the files.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the training data.'''} ) UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''A folder containing the validation data.'''} ) UpperCAmelCase : Optional[float] = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) UpperCAmelCase : Optional[int] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCAmelCase_ ( self : Dict ): _A = {} if self.train_dir is not None: _A = self.train_dir if self.validation_dir is not None: _A = self.validation_dir _A = data_files if data_files else None @dataclass class lowercase_ : '''simple docstring''' UpperCAmelCase : str = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCAmelCase : Optional[str] = field( default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) UpperCAmelCase : str = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCAmelCase : str = field(default=__lowerCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) UpperCAmelCase : float = field( default=0.75 , metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) UpperCAmelCase : bool = field( default=__lowerCAmelCase , metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : float = field( default=1E-3 , metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( _snake_case : int ) -> Optional[int]: '''simple docstring''' _A = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ) -> List[str]: '''simple docstring''' _A = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _A , _A , _A = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _A = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. _A = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _A = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. _A = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _A = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: _A = ds['train'].train_test_split(data_args.train_val_split ) _A = split['train'] _A = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _A = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: _A = ViTMAEConfig.from_pretrained(model_args.config_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _A = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: _A = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: _A = ViTImageProcessor() # create model if model_args.model_name_or_path: _A = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) _A = ViTMAEForPreTraining(_snake_case ) if training_args.do_train: _A = ds['train'].column_names else: _A = ds['validation'].column_names if data_args.image_column_name is not None: _A = data_args.image_column_name elif "image" in column_names: _A = 'image' elif "img" in column_names: _A = 'img' else: _A = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _A = image_processor.size['shortest_edge'] else: _A = (image_processor.size['height'], image_processor.size['width']) _A = Compose( [ Lambda(lambda _snake_case : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(_snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_snake_case : List[Any] ): _A = [transforms(_snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: _A = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: _A = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Compute absolute learning rate _A = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _A = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer _A = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: _A = None if training_args.resume_from_checkpoint is not None: _A = training_args.resume_from_checkpoint elif last_checkpoint is not None: _A = last_checkpoint _A = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _A = trainer.evaluate() trainer.log_metrics('eval' , _snake_case ) trainer.save_metrics('eval' , _snake_case ) # Write model card and (optionally) push to hub _A = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) def _snake_case ( _snake_case : List[str] ) -> Optional[Any]: '''simple docstring''' main() if __name__ == "__main__": main()
315
1
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def _snake_case ( ) -> tuple[list[int], int]: '''simple docstring''' _A = [randint(-10_00 , 10_00 ) for i in range(10 )] _A = randint(-50_00 , 50_00 ) return (arr, r) a = make_dataset() def _snake_case ( _snake_case : list[int] , _snake_case : int ) -> tuple[int, ...]: '''simple docstring''' for triplet in permutations(_snake_case , 3 ): if sum(_snake_case ) == target: return tuple(sorted(_snake_case ) ) return (0, 0, 0) def _snake_case ( _snake_case : list[int] , _snake_case : int ) -> tuple[int, int, int]: '''simple docstring''' arr.sort() _A = len(_snake_case ) for i in range(n - 1 ): _A , _A = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def _snake_case ( ) -> tuple[float, float]: '''simple docstring''' _A = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n' _A = '\ntriplet_sum1(*dataset)\n' _A = '\ntriplet_sum2(*dataset)\n' _A = repeat(setup=_snake_case , stmt=_snake_case , repeat=5 , number=1_00_00 ) _A = repeat(setup=_snake_case , stmt=_snake_case , repeat=5 , number=1_00_00 ) return (min(_snake_case ), min(_snake_case )) if __name__ == "__main__": from doctest import testmod testmod() a = solution_times() print(F'''The time for naive implementation is {times[0]}.''') print(F'''The time for optimized implementation is {times[1]}.''')
315
"""simple docstring""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor a = logging.getLogger(__name__) a = 50 # max width of layer names a = 70 # max width of quantizer names def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def _snake_case ( _snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if args.calibrator == "max": _A = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) _A = 'histogram' elif args.calibrator == "mse": _A = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) _A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case ) _A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]: '''simple docstring''' logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case ) if args.quant_disable: set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case ) if args.recalibrate_weights: recalibrate_weights(_snake_case ) if args.fuse_qkv: fuse_qkv(_snake_case , _snake_case ) if args.clip_gelu: clip_gelu(_snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str ) -> Any: '''simple docstring''' logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str: '''simple docstring''' logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(_snake_case ) def _snake_case ( _snake_case : str , _snake_case : int ) -> str: '''simple docstring''' def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ): for mod in [qq, qk, qv]: if not hasattr(_snake_case , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return _A = qq._amax.detach().item() _A = qk._amax.detach().item() _A = qv._amax.detach().item() _A = max(_snake_case , _snake_case , _snake_case ) qq._amax.fill_(_snake_case ) qk._amax.fill_(_snake_case ) qv._amax.fill_(_snake_case ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): _A = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case ) _A = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _snake_case ( _snake_case : List[str] ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: _A = mod.weight.shape[0] _A = mod._weight_quantizer._amax.detach() _A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _snake_case ( _snake_case : Dict ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _A = set(range(len(mod.weight.size() ) ) ) - axis_set _A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _A = amax def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: _A = [] elif not isinstance(_snake_case , _snake_case ): _A = [ignore] _A = 0 for name, mod in model.named_modules(): if not hasattr(_snake_case , 'weight' ): continue _A = max(_snake_case , len(_snake_case ) ) for name, mod in model.named_modules(): _A = getattr(_snake_case , '_input_quantizer' , _snake_case ) _A = getattr(_snake_case , '_weight_quantizer' , _snake_case ) if not hasattr(_snake_case , 'weight' ): continue if type(_snake_case ) in ignore: continue if [True for s in ignore if type(_snake_case ) is str and s in name]: continue _A = F'''Act:{input_q.extra_repr()}''' _A = F'''Wgt:{weight_q.extra_repr()}''' _A = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(_snake_case ) <= line_width: logger.info(_snake_case ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' _A = 0 for name, mod in model.named_modules(): if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int: '''simple docstring''' _A = getattr(_snake_case , _snake_case , _snake_case ) if quantizer_mod is not None: assert hasattr(_snake_case , _snake_case ) setattr(_snake_case , _snake_case , _snake_case ) else: logger.warning(F'''{name} has no {quantizer}''' ) def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str: '''simple docstring''' _A = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case ) if which in ["weight", "both"]: set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case ) logger.info(_snake_case ) def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): set_quantizers(_snake_case , _snake_case , **_snake_case ) elif name.endswith('_quantizer' ): for n in names: if re.search(_snake_case , _snake_case ): _A = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(_snake_case , _snake_case , _snake_case ) logger.info(_snake_case )
315
1
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup a = logging.get_logger(__name__) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Any , **_UpperCAmelCase : int ): requires_backends(self , ['bs4'] ) super().__init__(**_UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ): _A = [] _A = [] _A = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag _A = parent.find_all(child.name , recursive=_UpperCAmelCase ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_UpperCAmelCase ) else next(i for i, s in enumerate(_UpperCAmelCase , 1 ) if s is child ) ) _A = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[int] ): _A = BeautifulSoup(_UpperCAmelCase , 'html.parser' ) _A = [] _A = [] _A = [] for element in html_code.descendants: if type(_UpperCAmelCase ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue _A = html.unescape(_UpperCAmelCase ).strip() if not text_in_this_tag: continue all_doc_strings.append(_UpperCAmelCase ) _A , _A = self.xpath_soup(_UpperCAmelCase ) stringaxtag_seq.append(_UpperCAmelCase ) stringaxsubs_seq.append(_UpperCAmelCase ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError('Number of doc strings and xtags does not correspond' ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError('Number of doc strings and xsubs does not correspond' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ): _A = '' for tagname, subs in zip(_UpperCAmelCase , _UpperCAmelCase ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : int , _UpperCAmelCase : Optional[Any] ): _A = False # Check that strings has a valid type if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _A = True elif isinstance(_UpperCAmelCase , (list, tuple) ): if len(_UpperCAmelCase ) == 0 or isinstance(html_strings[0] , _UpperCAmelCase ): _A = True if not valid_strings: raise ValueError( 'HTML strings must of type `str`, `List[str]` (batch of examples), ' F'''but is of type {type(_UpperCAmelCase )}.''' ) _A = bool(isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(html_strings[0] , _UpperCAmelCase )) ) if not is_batched: _A = [html_strings] # Get nodes + xpaths _A = [] _A = [] for html_string in html_strings: _A , _A , _A = self.get_three_from_single(_UpperCAmelCase ) nodes.append(_UpperCAmelCase ) _A = [] for node, tag_list, sub_list in zip(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _A = self.construct_xpath(_UpperCAmelCase , _UpperCAmelCase ) xpath_strings.append(_UpperCAmelCase ) xpaths.append(_UpperCAmelCase ) # return as Dict _A = {'nodes': nodes, 'xpaths': xpaths} _A = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase ) return encoded_inputs
315
"""simple docstring""" from scipy.stats import spearmanr import datasets a = ''' The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. ''' a = ''' Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {\'spearmanr\': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric("spearmanr") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results[\'spearmanr\']) -0.7 >>> print(round(results[\'spearmanr_pvalue\'], 2)) 0.19 ''' a = r'''\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int]=False ): _A = spearmanr(_UpperCAmelCase , _UpperCAmelCase ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
315
1
"""simple docstring""" import numpy as np from transformers import Pipeline def _snake_case ( _snake_case : Optional[int] ) -> str: '''simple docstring''' _A = np.max(_snake_case , axis=-1 , keepdims=_snake_case ) _A = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_snake_case ) class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Tuple , **_UpperCAmelCase : Any ): _A = {} if "second_text" in kwargs: _A = kwargs['second_text'] return preprocess_kwargs, {}, {} def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int=None ): return self.tokenizer(_UpperCAmelCase , text_pair=_UpperCAmelCase , return_tensors=self.framework ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Union[str, Any] ): return self.model(**_UpperCAmelCase ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] ): _A = model_outputs.logits[0].numpy() _A = softmax(_UpperCAmelCase ) _A = np.argmax(_UpperCAmelCase ) _A = self.model.config.idalabel[best_class] _A = probabilities[best_class].item() _A = logits.tolist() return {"label": label, "score": score, "logits": logits}
315
"""simple docstring""" from collections.abc import Callable def _snake_case ( _snake_case : Callable[[float], float] , _snake_case : float , _snake_case : float ) -> float: '''simple docstring''' _A = a _A = b if function(_snake_case ) == 0: # one of the a or b is a root for the function return a elif function(_snake_case ) == 0: return b elif ( function(_snake_case ) * function(_snake_case ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('could not find root in given interval.' ) else: _A = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(_snake_case ) == 0: return mid elif function(_snake_case ) * function(_snake_case ) < 0: _A = mid else: _A = mid _A = start + (end - start) / 2.0 return mid def _snake_case ( _snake_case : float ) -> float: '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
315
1
"""simple docstring""" def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' return "".join(chr(ord(_snake_case ) - 32 ) if 'a' <= char <= 'z' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowercase_ ( __lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : int = ['''image_processor''', '''tokenizer'''] UpperCAmelCase : Union[str, Any] = '''ViltImageProcessor''' UpperCAmelCase : Optional[int] = ('''BertTokenizer''', '''BertTokenizerFast''') def __init__( self : Any , _UpperCAmelCase : str=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Union[str, Any] ): _A = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _UpperCAmelCase , ) _A = kwargs.pop('feature_extractor' ) _A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) _A = self.image_processor def __call__( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _UpperCAmelCase : bool = True , _UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , _UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True , _UpperCAmelCase : Optional[Union[str, TensorType]] = None , **_UpperCAmelCase : List[Any] , ): _A = self.tokenizer( text=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase , stride=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , return_offsets_mapping=_UpperCAmelCase , return_length=_UpperCAmelCase , verbose=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , ) # add pixel_values + pixel_mask _A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase ) encoding.update(_UpperCAmelCase ) return encoding def lowerCAmelCase_ ( self : List[Any] , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ): return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[Any] ): return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def lowerCAmelCase_ ( self : Tuple ): _A = self.tokenizer.model_input_names _A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def lowerCAmelCase_ ( self : Dict ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , ) return self.image_processor_class @property def lowerCAmelCase_ ( self : Tuple ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , ) return self.image_processor
315
"""simple docstring""" from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
315
1
"""simple docstring""" def _snake_case ( _snake_case : list , _snake_case : int = 0 ) -> list: '''simple docstring''' _A = length or len(_snake_case ) _A = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: _A , _A = list_data[i + 1], list_data[i] _A = True return list_data if not swapped else bubble_sort(_snake_case , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
315
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : tuple[int, int] , _snake_case : int ) -> list[tuple[int, int]]: '''simple docstring''' _A , _A = position _A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _A = [] for position in positions: _A , _A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_snake_case ) return permissible_positions def _snake_case ( _snake_case : list[list[int]] ) -> bool: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def _snake_case ( _snake_case : list[list[int]] , _snake_case : tuple[int, int] , _snake_case : int ) -> bool: '''simple docstring''' if is_complete(_snake_case ): return True for position in get_valid_pos(_snake_case , len(_snake_case ) ): _A , _A = position if board[y][x] == 0: _A = curr + 1 if open_knight_tour_helper(_snake_case , _snake_case , curr + 1 ): return True _A = 0 return False def _snake_case ( _snake_case : int ) -> list[list[int]]: '''simple docstring''' _A = [[0 for i in range(_snake_case )] for j in range(_snake_case )] for i in range(_snake_case ): for j in range(_snake_case ): _A = 1 if open_knight_tour_helper(_snake_case , (i, j) , 1 ): return board _A = 0 _A = F'''Open Kight Tour cannot be performed on a board of size {n}''' raise ValueError(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
315
1