code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def a__ ( _UpperCamelCase : int ): __lowerCamelCase = 3_84 __lowerCamelCase = 7 if "tiny" in model_name: __lowerCamelCase = 96 __lowerCamelCase = (2, 2, 6, 2) __lowerCamelCase = (3, 6, 12, 24) elif "small" in model_name: __lowerCamelCase = 96 __lowerCamelCase = (2, 2, 18, 2) __lowerCamelCase = (3, 6, 12, 24) elif "base" in model_name: __lowerCamelCase = 1_28 __lowerCamelCase = (2, 2, 18, 2) __lowerCamelCase = (4, 8, 16, 32) __lowerCamelCase = 12 __lowerCamelCase = 5_12 elif "large" in model_name: __lowerCamelCase = 1_92 __lowerCamelCase = (2, 2, 18, 2) __lowerCamelCase = (6, 12, 24, 48) __lowerCamelCase = 12 __lowerCamelCase = 7_68 # set label information __lowerCamelCase = 1_50 __lowerCamelCase = 'huggingface/label-files' __lowerCamelCase = 'ade20k-id2label.json' __lowerCamelCase = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) ) __lowerCamelCase = {int(__a ): v for k, v in idalabel.items()} __lowerCamelCase = {v: k for k, v in idalabel.items()} __lowerCamelCase = SwinConfig( embed_dim=__a ,depths=__a ,num_heads=__a ,window_size=__a ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ,) __lowerCamelCase = UperNetConfig( backbone_config=__a ,auxiliary_in_channels=__a ,num_labels=__a ,idalabel=__a ,labelaid=__a ,) return config def a__ ( _UpperCamelCase : Tuple ): __lowerCamelCase = [] # fmt: off # stem rename_keys.append(('''backbone.patch_embed.projection.weight''', '''backbone.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.projection.bias''', '''backbone.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''backbone.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''backbone.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Tuple ,_UpperCamelCase : List[Any] ): __lowerCamelCase = dct.pop(__a ) __lowerCamelCase = val def a__ ( _UpperCamelCase : str ,_UpperCamelCase : List[Any] ): __lowerCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __lowerCamelCase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __lowerCamelCase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" ) __lowerCamelCase = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase = in_proj_weight[:dim, :] __lowerCamelCase = in_proj_bias[: dim] __lowerCamelCase = in_proj_weight[ dim : dim * 2, : ] __lowerCamelCase = in_proj_bias[ dim : dim * 2 ] __lowerCamelCase = in_proj_weight[ -dim :, : ] __lowerCamelCase = in_proj_bias[-dim :] # fmt: on def a__ ( _UpperCamelCase : Union[str, Any] ): __lowerCamelCase = x.shape __lowerCamelCase = x.reshape(__a ,4 ,in_channel // 4 ) __lowerCamelCase = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(__a ,__a ) return x def a__ ( _UpperCamelCase : Dict ): __lowerCamelCase = x.shape __lowerCamelCase = x.reshape(__a ,in_channel // 4 ,4 ) __lowerCamelCase = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(__a ,__a ) return x def a__ ( _UpperCamelCase : int ): __lowerCamelCase = x.shape[0] __lowerCamelCase = x.reshape(4 ,in_channel // 4 ) __lowerCamelCase = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(__a ) return x def a__ ( _UpperCamelCase : Optional[Any] ): __lowerCamelCase = x.shape[0] __lowerCamelCase = x.reshape(in_channel // 4 ,4 ) __lowerCamelCase = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(__a ) return x def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : int ,_UpperCamelCase : str ): __lowerCamelCase = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } __lowerCamelCase = model_name_to_url[model_name] __lowerCamelCase = torch.hub.load_state_dict_from_url(__a ,map_location='''cpu''' ,file_name=__a )[ 'state_dict' ] for name, param in state_dict.items(): print(__a ,param.shape ) __lowerCamelCase = get_upernet_config(__a ) __lowerCamelCase = UperNetForSemanticSegmentation(__a ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): __lowerCamelCase = state_dict.pop(__a ) if "bn" in key: __lowerCamelCase = key.replace('''bn''' ,'''batch_norm''' ) __lowerCamelCase = val # rename keys __lowerCamelCase = create_rename_keys(__a ) for src, dest in rename_keys: rename_key(__a ,__a ,__a ) read_in_q_k_v(__a ,config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: __lowerCamelCase = reverse_correct_unfold_reduction_order(__a ) if "norm" in key: __lowerCamelCase = reverse_correct_unfold_norm_order(__a ) model.load_state_dict(__a ) # verify on image __lowerCamelCase = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' __lowerCamelCase = Image.open(requests.get(__a ,stream=__a ).raw ).convert('''RGB''' ) __lowerCamelCase = SegformerImageProcessor() __lowerCamelCase = processor(__a ,return_tensors='''pt''' ).pixel_values with torch.no_grad(): __lowerCamelCase = model(__a ) __lowerCamelCase = outputs.logits print(logits.shape ) print('''First values of logits:''' ,logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": __lowerCamelCase = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": __lowerCamelCase = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": __lowerCamelCase = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": __lowerCamelCase = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print('''Logits:''' ,outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] ,__a ,atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__a ) print(F"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__a ) if push_to_hub: print(F"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(F"""openmmlab/{model_name}""" ) processor.push_to_hub(F"""openmmlab/{model_name}""" ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""upernet-swin-tiny""", type=str, choices=[f"upernet-swin-{size}" for size in ["""tiny""", """small""", """base""", """large"""]], help="""Name of the Swin + UperNet model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) a_ = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
330
from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE_ : def __init__( self : List[Any] , _A : Optional[Any] , _A : Dict=13 , _A : Union[str, Any]=30 , _A : Tuple=2 , _A : Union[str, Any]=3 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : str=32 , _A : int=2 , _A : List[str]=4 , _A : List[str]=37 , _A : Tuple="gelu" , _A : Dict=0.1 , _A : Optional[Any]=0.1 , _A : Optional[int]=10 , _A : Optional[int]=0.0_2 , _A : Optional[Any]=3 , _A : str=0.6 , _A : Union[str, Any]=None , ) -> Any: """simple docstring""" snake_case_ : Optional[int] = parent snake_case_ : Tuple = batch_size snake_case_ : List[Any] = image_size snake_case_ : List[str] = patch_size snake_case_ : List[str] = num_channels snake_case_ : Optional[Any] = is_training snake_case_ : Any = use_labels snake_case_ : Tuple = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : List[Any] = num_attention_heads snake_case_ : Optional[Any] = intermediate_size snake_case_ : List[Any] = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Any = attention_probs_dropout_prob snake_case_ : Tuple = type_sequence_label_size snake_case_ : List[str] = initializer_range snake_case_ : Optional[Any] = mask_ratio snake_case_ : Any = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ : Optional[int] = (image_size // patch_size) ** 2 snake_case_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: """simple docstring""" snake_case_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Union[str, Any] = None if self.use_labels: snake_case_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : Union[str, Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self : int ) -> Optional[Any]: """simple docstring""" return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def UpperCAmelCase_ ( self : List[Any] , _A : int , _A : Dict , _A : str ) -> Dict: """simple docstring""" snake_case_ : Union[str, Any] = TFViTMAEModel(config=_A ) snake_case_ : str = model(_A , training=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase_ ( self : Dict , _A : Dict , _A : Any , _A : List[Any] ) -> int: """simple docstring""" snake_case_ : Any = TFViTMAEForPreTraining(_A ) snake_case_ : Optional[Any] = model(_A , training=_A ) # expected sequence length = num_patches snake_case_ : List[str] = (self.image_size // self.patch_size) ** 2 snake_case_ : Optional[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images snake_case_ : str = 1 snake_case_ : Dict = TFViTMAEForPreTraining(_A ) snake_case_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case_ : List[str] = model(_A , training=_A ) snake_case_ : Optional[Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" snake_case_ : List[Any] = self.prepare_config_and_inputs() ((snake_case_) ,(snake_case_) ,(snake_case_)) : Any = config_and_inputs snake_case_ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__: List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () __magic_name__: str = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} __magic_name__: Dict = False __magic_name__: Dict = False __magic_name__: List[Any] = False __magic_name__: Dict = False def UpperCAmelCase_ ( self : Any ) -> List[Any]: """simple docstring""" snake_case_ : List[Any] = TFViTMAEModelTester(self ) snake_case_ : Tuple = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" snake_case_ ,snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[Any] = model_class(_A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: """simple docstring""" snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : List[str] = model_class(_A ) snake_case_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : Dict = [*signature.parameters.keys()] snake_case_ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase_ ( self : List[Any] ) -> List[str]: """simple docstring""" snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A ) def UpperCAmelCase_ ( self : Tuple ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Union[str, Any] = self._prepare_for_class(_A , _A ) snake_case_ : List[str] = model(_A , noise=_A ) snake_case_ : Tuple = copy.deepcopy(self._prepare_for_class(_A , _A ) ) snake_case_ : str = model(**_A , noise=_A ) snake_case_ : Union[str, Any] = outputs_dict[0].numpy() snake_case_ : Optional[Any] = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 ) def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(_A : int ): snake_case_ : Any = {} for k, v in inputs_dict.items(): if tf.is_tensor(_A ): snake_case_ : str = v.numpy() else: snake_case_ : Optional[Any] = np.array(_A ) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ : int = model_class(_A ) snake_case_ : List[Any] = self._prepare_for_class(_A , _A ) snake_case_ : Any = prepare_numpy_arrays(_A ) snake_case_ : List[Any] = model(_A , noise=_A ) snake_case_ : List[Any] = model(**_A , noise=_A ) self.assert_outputs_same(_A , _A ) def UpperCAmelCase_ ( self : Tuple , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] ) -> List[str]: """simple docstring""" np.random.seed(2 ) snake_case_ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) snake_case_ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.constant(_A ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ : Optional[Any] = tf_noise super().check_pt_tf_models(_A , _A , _A ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(_A ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(_A , _A ),) if isinstance(_A , _A ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(_A , '_keras_serializable' , _A ) } snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) snake_case_ : Optional[int] = tf.convert_to_tensor(_A ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: snake_case_ : Optional[Any] = main_layer_class(_A ) snake_case_ : List[str] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } snake_case_ : Union[str, Any] = tf.keras.Model(_A , outputs=main_layer(_A ) ) snake_case_ : int = model(_A ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ : List[Any] = os.path.join(_A , 'keras_model.h5' ) model.save(_A ) snake_case_ : str = tf.keras.models.load_model( _A , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(_A , tf.keras.Model ) snake_case_ : List[str] = model(_A ) self.assert_outputs_same(_A , _A ) @slow def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : int = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : Optional[Any] = model_class(_A ) snake_case_ : Optional[Any] = self._prepare_for_class(_A , _A ) snake_case_ : int = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Any = outputs.last_hidden_state.numpy() snake_case_ : Optional[int] = 0 else: snake_case_ : str = outputs.logits.numpy() snake_case_ : Optional[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A , saved_model=_A ) snake_case_ : Any = model_class.from_pretrained(_A ) snake_case_ : Any = model(_A , noise=_A ) if model_class.__name__ == "TFViTMAEModel": snake_case_ : Dict = after_outputs['last_hidden_state'].numpy() snake_case_ : Dict = 0 else: snake_case_ : Any = after_outputs['logits'].numpy() snake_case_ : Optional[Any] = 0 snake_case_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(_A , 1E-5 ) def UpperCAmelCase_ ( self : Any ) -> str: """simple docstring""" np.random.seed(2 ) snake_case_ ,snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 ) snake_case_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: snake_case_ : str = model_class(_A ) snake_case_ : int = self._prepare_for_class(_A , _A ) snake_case_ : str = model(_A , noise=_A ) snake_case_ : Dict = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(_A ) snake_case_ : Any = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config snake_case_ : str = model_class.from_config(model.config ) snake_case_ : Union[str, Any] = new_model(_A ) # Build model new_model.set_weights(model.get_weights() ) snake_case_ : List[str] = new_model(_A , noise=_A ) self.assert_outputs_same(_A , _A ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @slow def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(_A ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @cached_property def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def UpperCAmelCase_ ( self : str ) -> Dict: """simple docstring""" np.random.seed(2 ) snake_case_ : List[str] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Dict = prepare_img() snake_case_ : Optional[Any] = image_processor(images=_A , return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ : int = ViTMAEConfig() snake_case_ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) snake_case_ : List[Any] = np.random.uniform(size=(1, num_patches) ) # forward pass snake_case_ : Optional[Any] = model(**_A , noise=_A ) # verify the logits snake_case_ : Optional[int] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , _A ) snake_case_ : Any = tf.convert_to_tensor( [[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , _A , atol=1E-4 )
327
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCamelCase__ : List[str] = { 'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Union[str, Any] = [ 'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoForCausalLM', 'GPTNeoForQuestionAnswering', 'GPTNeoForSequenceClassification', 'GPTNeoForTokenClassification', 'GPTNeoModel', 'GPTNeoPreTrainedModel', 'load_tf_weights_in_gpt_neo', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ : Any = [ 'FlaxGPTNeoForCausalLM', 'FlaxGPTNeoModel', 'FlaxGPTNeoPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys lowerCamelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
225
from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( __a , __a ): snake_case_ : list[list[int]] = [] snake_case_ : list[int] = [] snake_case_ : List[Any] = 0 snake_case_ : Union[str, Any] = sum(__a ) create_state_space_tree(__a , __a , __a , __a , __a , __a ) return result def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a , ): if sum(__a ) > max_sum or (remaining_nums_sum + sum(__a )) < max_sum: return if sum(__a ) == max_sum: result.append(__a ) return for index in range(__a , len(__a ) ): create_state_space_tree( __a , __a , index + 1 , [*path, nums[index]] , __a , remaining_nums_sum - nums[index] , ) _SCREAMING_SNAKE_CASE = [3, 34, 4, 12, 5, 2] _SCREAMING_SNAKE_CASE = 9 _SCREAMING_SNAKE_CASE = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
327
0
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class snake_case ( snake_case_ , unittest.TestCase ): '''simple docstring''' A_ : Tuple = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def _SCREAMING_SNAKE_CASE ( self : Any, _lowerCamelCase : Union[str, Any]=0 ): '''simple docstring''' __A = floats_tensor((1, 3, 1_28, 1_28), rng=random.Random(_A ) ) __A = np.random.RandomState(_A ) __A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=_A ) __A = self.get_dummy_inputs() __A = pipe(**_A ).images __A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_28, 1_28, 3) __A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' __A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' ) __A = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=_A ) pipe.set_progress_bar_config(disable=_A ) __A = self.get_dummy_inputs() __A = pipe(**_A ).images __A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' ) __A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_A ) # warmup pass to apply optimizations __A = pipe(**self.get_dummy_inputs() ) __A = self.get_dummy_inputs() __A = pipe(**_A ).images __A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ): '''simple docstring''' __A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' ) __A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_A ) __A = self.get_dummy_inputs() __A = pipe(**_A ).images __A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' ) __A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_A ) __A = self.get_dummy_inputs() __A = pipe(**_A ).images __A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Tuple ): '''simple docstring''' __A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' ) __A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_A ) __A = self.get_dummy_inputs() __A = pipe(**_A ).images __A = image[0, -3:, -3:, -1] assert image.shape == (1, 1_28, 1_28, 3) __A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class snake_case ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Dict ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ): '''simple docstring''' __A = ort.SessionOptions() __A = False return options def _SCREAMING_SNAKE_CASE ( self : List[Any] ): '''simple docstring''' __A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __A = init_image.resize((7_68, 5_12) ) # using the PNDM scheduler by default __A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''', revision='''onnx''', safety_checker=_A, feature_extractor=_A, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=_A ) __A = 'A fantasy landscape, trending on artstation' __A = np.random.RandomState(0 ) __A = pipe( prompt=_A, image=_A, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=_A, output_type='''np''', ) __A = output.images __A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) __A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ): '''simple docstring''' __A = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) __A = init_image.resize((7_68, 5_12) ) __A = LMSDiscreteScheduler.from_pretrained( '''runwayml/stable-diffusion-v1-5''', subfolder='''scheduler''', revision='''onnx''' ) __A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''', revision='''onnx''', scheduler=_A, safety_checker=_A, feature_extractor=_A, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=_A ) __A = 'A fantasy landscape, trending on artstation' __A = np.random.RandomState(0 ) __A = pipe( prompt=_A, image=_A, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=_A, output_type='''np''', ) __A = output.images __A = images[0, 2_55:2_58, 3_83:3_86, -1] assert images.shape == (1, 5_12, 7_68, 3) __A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
266
def SCREAMING_SNAKE_CASE__ ( __a , __a ): if density <= 0: raise ValueError('Impossible fluid density' ) if bulk_modulus <= 0: raise ValueError('Impossible bulk modulus' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
327
0
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) __lowerCAmelCase : Dict = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ = field( default=snake_case_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ = field( default=snake_case_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ = field( default=snake_case_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) a__ = field(default=snake_case_ , metadata={"""help""": """Whether tp freeze the encoder."""} ) a__ = field(default=snake_case_ , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) a__ = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) a__ = field( default=10_24 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ = field( default=1_28 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ = field( default=1_42 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) a__ = field( default=1_42 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) a__ = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) a__ = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) a__ = field(default=snake_case_ , metadata={"""help""": """Source language id for translation."""} ) a__ = field(default=snake_case_ , metadata={"""help""": """Target language id for translation."""} ) a__ = field(default=snake_case_ , metadata={"""help""": """# num_beams to use for evaluation."""} ) a__ = field( default=snake_case_ , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def a__ ( A_, A_, A_ ): '''simple docstring''' logger.info(f'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(f''' {key} = {metrics[key]}''' ) save_json(__a, os.path.join(__a, f'''{split}_results.json''' ) ) def a__ ( ): '''simple docstring''' __magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __magic_name__ = parser.parse_args_into_dataclasses() check_output_dir(__a ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ), training_args.fpaa, ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""", __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __magic_name__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) __magic_name__ = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(__a, __a, __a ): assert hasattr(__a, __a ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(__a, __a, getattr(__a, __a ) ) __magic_name__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) __magic_name__ = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path, from_tf=""".ckpt""" in model_args.model_name_or_path, config=__a, cache_dir=model_args.cache_dir, ) # use task specific params use_task_specific_params(__a, data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: __magic_name__ = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__a, (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__a, __a ): __magic_name__ = tokenizer.lang_code_to_id[data_args.tgt_lang] else: __magic_name__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) __magic_name__ = SeqaSeqDataset # Get datasets __magic_name__ = ( dataset_class( __a, type_path="""train""", data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or """""", ) if training_args.do_train else None ) __magic_name__ = ( dataset_class( __a, type_path="""val""", data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or """""", ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) __magic_name__ = ( dataset_class( __a, type_path="""test""", data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or """""", ) if training_args.do_predict else None ) # Initialize our Trainer __magic_name__ = ( build_compute_metrics_fn(data_args.task, __a ) if training_args.predict_with_generate else None ) __magic_name__ = SeqaSeqTrainer( model=__a, args=__a, data_args=__a, train_dataset=__a, eval_dataset=__a, data_collator=SeqaSeqDataCollator( __a, __a, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=__a, tokenizer=__a, ) __magic_name__ = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) __magic_name__ = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) __magic_name__ = train_result.metrics __magic_name__ = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""", __a, training_args.output_dir ) all_metrics.update(__a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __magic_name__ = trainer.evaluate(metric_key_prefix="""val""" ) __magic_name__ = data_args.n_val __magic_name__ = round(metrics["""val_loss"""], 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""", __a, training_args.output_dir ) all_metrics.update(__a ) if training_args.do_predict: logger.info("""*** Predict ***""" ) __magic_name__ = trainer.predict(test_dataset=__a, metric_key_prefix="""test""" ) __magic_name__ = test_output.metrics __magic_name__ = data_args.n_test if trainer.is_world_process_zero(): __magic_name__ = round(metrics["""test_loss"""], 4 ) handle_metrics("""test""", __a, training_args.output_dir ) all_metrics.update(__a ) if training_args.predict_with_generate: __magic_name__ = tokenizer.batch_decode( test_output.predictions, skip_special_tokens=__a, clean_up_tokenization_spaces=__a ) __magic_name__ = lmap(str.strip, __a ) write_txt_file(__a, os.path.join(training_args.output_dir, """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(__a, os.path.join(training_args.output_dir, """all_results.json""" ) ) return all_metrics def a__ ( A_ ): '''simple docstring''' main() if __name__ == "__main__": main()
88
from math import pi def SCREAMING_SNAKE_CASE__ ( __a , __a ): return 2 * pi * radius * (angle / 3_60) if __name__ == "__main__": print(arc_length(90, 10))
327
0
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): if number > 0: raise ValueError('''input must be a negative integer''' ) __lowercase : Dict = len(bin(__a )[3:] ) __lowercase : Union[str, Any] = bin(abs(__a ) - (1 << binary_number_length) )[3:] __lowercase : List[Any] = ( ( '1' + '0' * (binary_number_length - len(__a )) + twos_complement_number ) if number < 0 else '0' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
249
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: Optional[Any] = ["pixel_values"] def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PIL.Image.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : str , ) -> None: """simple docstring""" super().__init__(**_A ) snake_case_ : Dict = size if size is not None else {'height': 256, 'width': 256} snake_case_ : Tuple = get_size_dict(_A ) snake_case_ : str = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case_ : int = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Union[str, Any] = do_resize snake_case_ : str = size snake_case_ : List[str] = resample snake_case_ : List[Any] = do_center_crop snake_case_ : Dict = crop_size snake_case_ : Tuple = do_rescale snake_case_ : Optional[Any] = rescale_factor snake_case_ : Any = do_normalize snake_case_ : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase_ ( self : Optional[int] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PIL.Image.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : List[str] , ) -> np.ndarray: """simple docstring""" snake_case_ : Tuple = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( _A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ) -> np.ndarray: """simple docstring""" snake_case_ : Optional[int] = get_size_dict(_A ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ) -> str: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : Any , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase_ ( self : List[str] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : Union[str, Any]=None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ) -> PIL.Image.Image: """simple docstring""" snake_case_ : int = do_resize if do_resize is not None else self.do_resize snake_case_ : str = resample if resample is not None else self.resample snake_case_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize snake_case_ : Any = image_mean if image_mean is not None else self.image_mean snake_case_ : Dict = image_std if image_std is not None else self.image_std snake_case_ : int = size if size is not None else self.size snake_case_ : Optional[int] = get_size_dict(_A ) snake_case_ : int = crop_size if crop_size is not None else self.crop_size snake_case_ : Any = get_size_dict(_A , param_name='crop_size' ) snake_case_ : Optional[Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. snake_case_ : Optional[Any] = [to_numpy_array(_A ) for image in images] if do_resize: snake_case_ : Dict = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: snake_case_ : Optional[Any] = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: snake_case_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: snake_case_ : str = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] snake_case_ : Dict = [to_channel_dimension_format(_A , _A ) for image in images] snake_case_ : Tuple = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
327
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) UpperCAmelCase__ : Any = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Tuple = ['BeitFeatureExtractor'] UpperCAmelCase__ : str = ['BeitImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : Tuple = [ 'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BeitForImageClassification', 'BeitForMaskedImageModeling', 'BeitForSemanticSegmentation', 'BeitModel', 'BeitPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ : str = [ 'FlaxBeitForImageClassification', 'FlaxBeitForMaskedImageModeling', 'FlaxBeitModel', 'FlaxBeitPreTrainedModel', ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys UpperCAmelCase__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
25
import sys _SCREAMING_SNAKE_CASE = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def SCREAMING_SNAKE_CASE__ ( __a = N ): snake_case_ : Optional[Any] = -sys.maxsize - 1 for i in range(len(__a ) - 12 ): snake_case_ : Optional[Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: snake_case_ : int = product return largest_product if __name__ == "__main__": print(F'''{solution() = }''')
327
0
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowercase :List[Any] = BigBirdConfig.from_json_file(__a ) print(F"Building PyTorch model from configuration: {config}" ) if is_trivia_qa: lowercase :Dict = BigBirdForQuestionAnswering(__a ) else: lowercase :Tuple = BigBirdForPreTraining(__a ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(__a, __a, is_trivia_qa=__a ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(__a ) if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--big_bird_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head." ) _UpperCAmelCase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
236
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __magic_name__: Optional[str] = field( default=snake_case_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether tp freeze the encoder."} ) __magic_name__: bool = field(default=snake_case_ , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class SCREAMING_SNAKE_CASE_ : __magic_name__: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) __magic_name__: Optional[str] = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) __magic_name__: Optional[int] = field( default=1024 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) __magic_name__: Optional[int] = field( default=142 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) __magic_name__: Optional[int] = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Source language id for translation."} ) __magic_name__: Optional[str] = field(default=snake_case_ , metadata={"help": "Target language id for translation."} ) __magic_name__: Optional[int] = field(default=snake_case_ , metadata={"help": "# num_beams to use for evaluation."} ) __magic_name__: bool = field( default=snake_case_ , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(__a , os.path.join(__a , f"""{split}_results.json""" ) ) def SCREAMING_SNAKE_CASE__ ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_ ,snake_case_ ,snake_case_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_ ,snake_case_ ,snake_case_ : List[str] = parser.parse_args_into_dataclasses() check_output_dir(__a ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('Training/evaluation parameters %s' , __a ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout') for p in extra_model_params: if getattr(__a , __a , __a ): assert hasattr(__a , __a ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(__a , __a , getattr(__a , __a ) ) snake_case_ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ : Any = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=__a , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(__a , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: snake_case_ : Any = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(__a , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(__a , __a ): snake_case_ : int = tokenizer.lang_code_to_id[data_args.tgt_lang] else: snake_case_ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(__a ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) snake_case_ : List[Any] = SeqaSeqDataset # Get datasets snake_case_ : List[Any] = ( dataset_class( __a , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_train else None ) snake_case_ : List[str] = ( dataset_class( __a , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) snake_case_ : List[Any] = ( dataset_class( __a , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , ) if training_args.do_predict else None ) # Initialize our Trainer snake_case_ : Any = ( build_compute_metrics_fn(data_args.task , __a ) if training_args.predict_with_generate else None ) snake_case_ : List[str] = SeqaSeqTrainer( model=__a , args=__a , data_args=__a , train_dataset=__a , eval_dataset=__a , data_collator=SeqaSeqDataCollator( __a , __a , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__a , tokenizer=__a , ) snake_case_ : Optional[int] = {} # Training if training_args.do_train: logger.info('*** Train ***' ) snake_case_ : Any = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) snake_case_ : Tuple = train_result.metrics snake_case_ : List[str] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('train' , __a , training_args.output_dir ) all_metrics.update(__a ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ : List[Any] = trainer.evaluate(metric_key_prefix='val' ) snake_case_ : str = data_args.n_val snake_case_ : Union[str, Any] = round(metrics['val_loss'] , 4 ) if trainer.is_world_process_zero(): handle_metrics('val' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.do_predict: logger.info('*** Predict ***' ) snake_case_ : Dict = trainer.predict(test_dataset=__a , metric_key_prefix='test' ) snake_case_ : Union[str, Any] = test_output.metrics snake_case_ : int = data_args.n_test if trainer.is_world_process_zero(): snake_case_ : List[str] = round(metrics['test_loss'] , 4 ) handle_metrics('test' , __a , training_args.output_dir ) all_metrics.update(__a ) if training_args.predict_with_generate: snake_case_ : Any = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) snake_case_ : Any = lmap(str.strip , __a ) write_txt_file(__a , os.path.join(training_args.output_dir , 'test_generations.txt' ) ) if trainer.is_world_process_zero(): save_json(__a , os.path.join(training_args.output_dir , 'all_results.json' ) ) return all_metrics def SCREAMING_SNAKE_CASE__ ( __a ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
327
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class a ( snake_case_ ): """simple docstring""" lowerCamelCase :Dict = ["pixel_values"] def __init__( self , lowerCAmelCase_ = True , lowerCAmelCase_ = 1 / 2_55 , lowerCAmelCase_ = True , lowerCAmelCase_ = 8 , **lowerCAmelCase_ , ) -> None: super().__init__(**_A ) _A = do_rescale _A = rescale_factor _A = do_pad _A = pad_size def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , **lowerCAmelCase_ ) -> np.ndarray: return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> int: _A = get_image_size(_A ) _A = (old_height // size + 1) * size - old_height _A = (old_width // size + 1) * size - old_width return pad(_A , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=_A ) def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = ChannelDimension.FIRST , **lowerCAmelCase_ , ) -> Any: _A = do_rescale if do_rescale is not None else self.do_rescale _A = rescale_factor if rescale_factor is not None else self.rescale_factor _A = do_pad if do_pad is not None else self.do_pad _A = pad_size if pad_size is not None else self.pad_size _A = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. _A = [to_numpy_array(_A ) for image in images] if do_rescale: _A = [self.rescale(image=_A , scale=_A ) for image in images] if do_pad: _A = [self.pad(_A , size=_A ) for image in images] _A = [to_channel_dimension_format(_A , _A ) for image in images] _A = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
180
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { """configuration_poolformer""": [ """POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PoolFormerConfig""", """PoolFormerOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["""PoolFormerFeatureExtractor"""] _SCREAMING_SNAKE_CASE = ["""PoolFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PoolFormerForImageClassification""", """PoolFormerModel""", """PoolFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
327
0
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black __UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __UpperCAmelCase = ' \"\"\"\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n' class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Any = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) ) lowerCAmelCase_ :Any = self.diffusers_dir shutil.copy( os.path.join(_A , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , ) def __lowerCAmelCase ( self ) -> List[str]: lowerCAmelCase_ :List[str] = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def __lowerCAmelCase ( self , __A , __A , __A , __A=None ) -> Optional[int]: lowerCAmelCase_ :List[Any] = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowerCAmelCase_ :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowerCAmelCase_ :Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowerCAmelCase_ :Tuple = black.format_str(_A , mode=_A ) lowerCAmelCase_ :Union[str, Any] = os.path.join(self.diffusers_dir , """new_code.py""" ) with open(_A , """w""" , newline="""\n""" ) as f: f.write(_A ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_A ) with open(_A , """r""" ) as f: self.assertTrue(f.read() , _A ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :List[str] = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" ) self.assertEqual(_A , _A ) def __lowerCAmelCase ( self ) -> str: self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , ) # With no empty line at the end self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , _A , ) # Copy consistency with rename self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , _A ) , ) # Copy consistency with a really long name lowerCAmelCase_ :int = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("""Bert""" , _A , _A ) , ) # Copy consistency with overwrite self.check_copy_consistency( """# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , _A , overwrite_result=re.sub("""DDPM""" , """Test""" , _A ) , )
84
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def UpperCAmelCase_ ( self : Dict ) -> List[Any]: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(_A ) snake_case_ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : List[Any] = tokenizer('This is me' , return_tensors='pt' ) snake_case_ : Any = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) snake_case_ : Optional[Any] = model.generate(**_A ) snake_case_ : int = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_A ) snake_case_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_A ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) snake_case_ : Optional[Any] = model_reloaded.generate(**_A ) self.assertTrue(torch.allclose(_A , _A ) ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple: """simple docstring""" snake_case_ : Any = 'hf-internal-testing/tiny-random-t5' snake_case_ : int = AutoModelForSeqaSeqLM.from_pretrained(_A ) snake_case_ : Dict = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(_A ): model.save_pretrained(_A ) snake_case_ : Union[str, Any] = model.reverse_bettertransformer() model.save_pretrained(_A )
327
0
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _lowercase : Tuple =logging.getLogger(__name__) class snake_case__ (snake_case_ ): """simple docstring""" def __init__( self , __lowercase=-1 ) -> Dict: """simple docstring""" a__ : str = label_idx def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> List[InputExample]: """simple docstring""" if isinstance(_A , _A ): a__ : int = mode.value a__ : Union[str, Any] = os.path.join(_A , F'''{mode}.txt''' ) a__ : Dict = 1 a__ : Optional[int] = [] with open(_A , encoding="""utf-8""" ) as f: a__ : str = [] a__ : List[str] = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_A , labels=_A ) ) guid_index += 1 a__ : List[str] = [] a__ : List[Any] = [] else: a__ : List[str] = line.split(""" """ ) words.append(splits[0] ) if len(_A ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_A , labels=_A ) ) return examples def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> Dict: """simple docstring""" a__ : int = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(_A ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: a__ : Dict = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n' writer.write(_A ) else: logger.warning("""Maximum sequence length exceeded: No prediction for \'%s\'.""" , line.split()[0] ) def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]: """simple docstring""" if path: with open(_A , """r""" ) as f: a__ : Optional[int] = f.read().splitlines() if "O" not in labels: a__ : Dict = ['O'] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class snake_case__ (snake_case_ ): """simple docstring""" def __init__( self ) -> Tuple: """simple docstring""" super().__init__(label_idx=-2 ) def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]: """simple docstring""" if path: with open(_A , """r""" ) as f: a__ : Optional[Any] = f.read().splitlines() if "O" not in labels: a__ : Optional[Any] = ['O'] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class snake_case__ (snake_case_ ): """simple docstring""" def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> List[InputExample]: """simple docstring""" if isinstance(_A , _A ): a__ : Tuple = mode.value a__ : Tuple = os.path.join(_A , F'''{mode}.txt''' ) a__ : Optional[Any] = 1 a__ : List[str] = [] with open(_A , encoding="""utf-8""" ) as f: for sentence in parse_incr(_A ): a__ : int = [] a__ : Dict = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(_A ) == len(_A ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_A , labels=_A ) ) guid_index += 1 return examples def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase ) -> int: """simple docstring""" a__ : List[str] = 0 for sentence in parse_incr(_A ): a__ : Tuple = preds_list[example_id] a__ : Tuple = '' for token in sentence: out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) ''' out += "\n" writer.write(_A ) example_id += 1 def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]: """simple docstring""" if path: with open(_A , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
170
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def __init__( self : Union[str, Any] , _A : Any , _A : Dict ) -> Union[str, Any]: """simple docstring""" snake_case_ : str = params snake_case_ : int = np.array(_A ) snake_case_ : Optional[int] = np.array([len(_A ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Tuple , _A : Optional[int] ) -> str: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self : List[str] ) -> str: """simple docstring""" return len(self.lengths ) def UpperCAmelCase_ ( self : Dict ) -> str: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: """simple docstring""" snake_case_ : Dict = self.params.max_model_input_size snake_case_ : Tuple = self.lengths > max_len logger.info(F"""Splitting {sum(_A )} too long sequences.""" ) def divide_chunks(_A : Union[str, Any] , _A : Dict ): return [l[i : i + n] for i in range(0 , len(_A ) , _A )] snake_case_ : Dict = [] snake_case_ : Union[str, Any] = [] if self.params.mlm: snake_case_ ,snake_case_ : Optional[int] = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: snake_case_ ,snake_case_ : Any = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: snake_case_ : List[Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: snake_case_ : Optional[int] = np.insert(_A , 0 , _A ) if sub_s[-1] != sep_id: snake_case_ : Optional[Any] = np.insert(_A , len(_A ) , _A ) assert len(_A ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(_A ) new_tok_ids.extend(_A ) new_lengths.extend([len(_A ) for l in sub_seqs] ) snake_case_ : Tuple = np.array(_A ) snake_case_ : int = np.array(_A ) def UpperCAmelCase_ ( self : List[str] ) -> List[str]: """simple docstring""" snake_case_ : Tuple = len(self ) snake_case_ : int = self.lengths > 11 snake_case_ : Dict = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : List[Any] = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: snake_case_ : Optional[Any] = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = len(self ) snake_case_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) snake_case_ : Any = (unk_occs / self.lengths) < 0.5 snake_case_ : List[Any] = self.token_ids[indices] snake_case_ : int = self.lengths[indices] snake_case_ : Tuple = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def UpperCAmelCase_ ( self : Optional[int] , _A : Union[str, Any] ) -> List[Any]: """simple docstring""" snake_case_ : Any = [t[0] for t in batch] snake_case_ : int = [t[1] for t in batch] assert len(_A ) == len(_A ) # Max for paddings snake_case_ : str = max(_A ) # Pad token ids if self.params.mlm: snake_case_ : int = self.params.special_tok_ids['pad_token'] else: snake_case_ : Dict = self.params.special_tok_ids['unk_token'] snake_case_ : Dict = [list(t.astype(_A ) ) + [pad_idx] * (max_seq_len_ - len(_A )) for t in token_ids] assert len(tk_ ) == len(_A ) assert all(len(_A ) == max_seq_len_ for t in tk_ ) snake_case_ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_) snake_case_ : Optional[Any] = torch.tensor(_A ) # (bs) return tk_t, lg_t
327
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _lowerCamelCase : Optional[int] = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Tuple = ["""PerceiverFeatureExtractor"""] _lowerCamelCase : List[Any] = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : str = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
14
def SCREAMING_SNAKE_CASE__ ( __a , __a ): while b: snake_case_ ,snake_case_ : Any = b, a % b return a def SCREAMING_SNAKE_CASE__ ( __a , __a ): return a if b == 0 else euclidean_gcd_recursive(__a , a % b ) def SCREAMING_SNAKE_CASE__ ( ): print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" ) print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" ) print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" ) print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" ) print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" ) print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" ) print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" ) print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" ) print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" ) if __name__ == "__main__": main()
327
0
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": a_ = argparse.ArgumentParser( description=( """Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""]) parser.add_argument("""--model_name""", default="""roberta-large""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") a_ = parser.parse_args() if args.model_type == "roberta": a_ = RobertaForMaskedLM.from_pretrained(args.model_name) a_ = """roberta""" elif args.model_type == "gpt2": a_ = GPTaLMHeadModel.from_pretrained(args.model_name) a_ = """transformer""" a_ = model.state_dict() a_ = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: a_ = state_dict[f"{prefix}.{param_name}"] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: a_ = f"{prefix}.embeddings.{w}.weight" a_ = state_dict[param_name] for w in ["weight", "bias"]: a_ = f"{prefix}.embeddings.LayerNorm.{w}" a_ = state_dict[param_name] # Transformer Blocks # a_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: a_ = state_dict[ f"{prefix}.h.{teacher_idx}.{layer}.{w}" ] a_ = state_dict[f"{prefix}.h.{teacher_idx}.attn.bias"] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: a_ = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}" ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: a_ = state_dict[f"{layer}"] if args.vocab_transform: for w in ["weight", "bias"]: a_ = state_dict[f"lm_head.dense.{w}"] a_ = state_dict[f"lm_head.layer_norm.{w}"] elif args.model_type == "gpt2": for w in ["weight", "bias"]: a_ = state_dict[f"{prefix}.ln_f.{w}"] a_ = state_dict["""lm_head.weight"""] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
330
import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(""">=""", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _SCREAMING_SNAKE_CASE = get_logger(__name__) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : Dict = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Dict = os.path.join(__a , __a ) if accelerator.process_index == 0: logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Dict = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Dict = os.path.join(__a , __a ) logger.info(f"""Saving model to {output_model_file}""" ) torch.save(__a , __a ) logger.info(f"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving model to {ckpt_dir}""" ) snake_case_ : int = {'model': state_dict} dist_cp.save_state_dict( state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Model saved to {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(__a ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin""" snake_case_ : Optional[Any] = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[Any] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ : Optional[Any] = ( f"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) snake_case_ : Tuple = os.path.join(__a , __a ) logger.info(f"""Loading model from {input_model_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ : Tuple = ( os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" ) if f"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading model from {ckpt_dir}""" ) snake_case_ : List[Any] = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , ) snake_case_ : Any = state_dict['model'] logger.info(f"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(__a ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): os.makedirs(__a , exist_ok=__a ) with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ : str = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : Any = os.path.join(__a , __a ) logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(__a , __a ) logger.info(f"""Optimizer state saved in {output_optimizer_file}""" ) else: snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(__a , exist_ok=__a ) logger.info(f"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , ) logger.info(f"""Optimizer state saved in {ckpt_dir}""" ) def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( __a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ : Optional[Any] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ : Union[str, Any] = ( f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) snake_case_ : List[Any] = os.path.join(__a , __a ) logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" ) snake_case_ : Optional[int] = torch.load(__a ) logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" ) else: snake_case_ : str = ( os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if f"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(f"""Loading Optimizer from {ckpt_dir}""" ) snake_case_ : Any = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , ) snake_case_ : Optional[int] = optim_state['optimizer'] logger.info(f"""Optimizer loaded from {ckpt_dir}""" ) snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a ) optimizer.load_state_dict(__a )
327
0
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCamelCase__ : int = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class lowerCamelCase_ ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : bool , _lowerCAmelCase : str = None , _lowerCAmelCase : list = None ): SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.join('examples' , 'by_feature' ) ) SCREAMING_SNAKE_CASE_ = os.path.abspath('examples' ) for item in os.listdir(_A ): if item not in EXCLUDE_EXAMPLES: SCREAMING_SNAKE_CASE_ = os.path.join(_A , _A ) if os.path.isfile(_A ) and ".py" in item_path: with self.subTest( tested_script=_A , feature_script=_A , tested_section='main()' if parser_only else 'training_function()' , ): SCREAMING_SNAKE_CASE_ = compare_against_test( os.path.join(_A , _A ) , _A , _A , _A ) SCREAMING_SNAKE_CASE_ = '\n'.join(_A ) if special_strings is not None: for string in special_strings: SCREAMING_SNAKE_CASE_ = diff.replace(_A , '' ) self.assertEqual(_A , '' ) def lowerCAmelCase_ ( self : Tuple ): self.one_complete_example('complete_nlp_example.py' , _A ) self.one_complete_example('complete_nlp_example.py' , _A ) def lowerCAmelCase_ ( self : str ): SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) ) SCREAMING_SNAKE_CASE_ = [ ' ' * 16 + '{\n\n', ' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n', ' ' * 20 + '"f1": eval_metric["f1"],\n\n', ' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n', ' ' * 20 + '"epoch": epoch,\n\n', ' ' * 16 + '},\n\n', ' ' * 16 + 'step=epoch,\n', ' ' * 12, ' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n', ] self.one_complete_example('complete_cv_example.py' , _A , _A , _A ) self.one_complete_example('complete_cv_example.py' , _A , _A , _A ) @mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} ) class lowerCamelCase_ ( snake_case_ ): '''simple docstring''' lowercase_ = False @classmethod def lowerCAmelCase_ ( cls : Any ): super().setUpClass() SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ = os.path.join(cls._tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) SCREAMING_SNAKE_CASE_ = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def lowerCAmelCase_ ( cls : int ): super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowerCAmelCase_ ( self : List[str] ): SCREAMING_SNAKE_CASE_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) ) def lowerCAmelCase_ ( self : int ): SCREAMING_SNAKE_CASE_ = F"\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n ".split() SCREAMING_SNAKE_CASE_ = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) ) def lowerCAmelCase_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n ".split() SCREAMING_SNAKE_CASE_ = run_command(self._launch_args + testargs , return_stdout=_A ) self.assertNotIn('epoch 0:' , _A ) self.assertIn('epoch 1:' , _A ) def lowerCAmelCase_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE_ = F"\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n ".split() SCREAMING_SNAKE_CASE_ = run_command(self._launch_args + testargs , return_stdout=_A ) if torch.cuda.is_available(): SCREAMING_SNAKE_CASE_ = torch.cuda.device_count() else: SCREAMING_SNAKE_CASE_ = 1 if num_processes > 1: self.assertNotIn('epoch 0:' , _A ) self.assertIn('epoch 1:' , _A ) else: self.assertIn('epoch 0:' , _A ) self.assertIn('epoch 1:' , _A ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split() with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ): SCREAMING_SNAKE_CASE_ = run_command(self._launch_args + testargs , return_stdout=_A ) SCREAMING_SNAKE_CASE_ = re.findall('({.+})' , _A ) SCREAMING_SNAKE_CASE_ = [r for r in results if 'accuracy' in r][-1] SCREAMING_SNAKE_CASE_ = ast.literal_eval(_A ) self.assertGreaterEqual(results['accuracy'] , 0.75 ) def lowerCAmelCase_ ( self : Any ): SCREAMING_SNAKE_CASE_ = ['examples/by_feature/multi_process_metrics.py'] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def lowerCAmelCase_ ( self : List[str] ): with tempfile.TemporaryDirectory() as tmpdir: SCREAMING_SNAKE_CASE_ = F"\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n ".split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(_A , 'tracking' ) ) ) def lowerCAmelCase_ ( self : Optional[int] ): SCREAMING_SNAKE_CASE_ = ['examples/by_feature/gradient_accumulation.py'] run_command(self._launch_args + testargs ) def lowerCAmelCase_ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE_ = ['examples/by_feature/local_sgd.py'] run_command(self._launch_args + testargs )
225
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : int = 32 , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : bool = True , _A : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , _A : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , _A : bool = True , _A : Tuple=7 , _A : Tuple=30 , _A : int=400 , _A : Tuple=3 , ) -> Optional[int]: """simple docstring""" snake_case_ : str = parent snake_case_ : str = do_resize snake_case_ : str = size if size is not None else {'shortest_edge': 288} snake_case_ : Any = size_divisor snake_case_ : Any = do_rescale snake_case_ : Union[str, Any] = rescale_factor snake_case_ : str = do_normalize snake_case_ : int = do_center_crop snake_case_ : str = image_mean snake_case_ : int = image_std snake_case_ : Any = do_pad snake_case_ : Optional[int] = batch_size snake_case_ : List[str] = num_channels snake_case_ : Any = min_resolution snake_case_ : str = max_resolution def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def UpperCAmelCase_ ( self : Dict , _A : str , _A : Union[str, Any]=False ) -> int: """simple docstring""" if not batched: snake_case_ : Optional[int] = self.size['shortest_edge'] snake_case_ : List[Any] = image_inputs[0] if isinstance(_A , Image.Image ): snake_case_ ,snake_case_ : Optional[Any] = image.size else: snake_case_ ,snake_case_ : str = image.shape[1], image.shape[2] snake_case_ : Dict = size / min(_A , _A ) if h < w: snake_case_ ,snake_case_ : str = size, scale * w else: snake_case_ ,snake_case_ : Tuple = scale * h, size snake_case_ : Dict = int((1333 / 800) * size ) if max(_A , _A ) > max_size: snake_case_ : Union[str, Any] = max_size / max(_A , _A ) snake_case_ : Any = newh * scale snake_case_ : Union[str, Any] = neww * scale snake_case_ ,snake_case_ : Any = int(newh + 0.5 ), int(neww + 0.5 ) snake_case_ ,snake_case_ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: snake_case_ : Optional[int] = [] for image in image_inputs: snake_case_ ,snake_case_ : Optional[int] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case_ : str = max(_A , key=lambda _A : item[0] )[0] snake_case_ : List[str] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ): __magic_name__: List[Any] = BridgeTowerImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self : List[Any] ) -> Tuple: """simple docstring""" snake_case_ : int = BridgeTowerImageProcessingTester(self ) @property def UpperCAmelCase_ ( self : int ) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self : Union[str, Any] ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , 'image_mean' ) ) self.assertTrue(hasattr(_A , 'image_std' ) ) self.assertTrue(hasattr(_A , 'do_normalize' ) ) self.assertTrue(hasattr(_A , 'do_resize' ) ) self.assertTrue(hasattr(_A , 'size' ) ) self.assertTrue(hasattr(_A , 'size_divisor' ) ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self : Tuple ) -> Tuple: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : List[str] = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any: """simple docstring""" snake_case_ : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : Any = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Optional[Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case_ : str = image_processing(_A , return_tensors='pt' ).pixel_values snake_case_ ,snake_case_ : Tuple = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
327
0
"""simple docstring""" import re from filelock import FileLock try: import nltk lowercase_ = True except (ImportError, ModuleNotFoundError): lowercase_ = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__a ) )
266
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration _SCREAMING_SNAKE_CASE = 50_00_00 _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = os.path.split(__file__) _SCREAMING_SNAKE_CASE = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : int = dataset.map(**__a ) @get_duration def SCREAMING_SNAKE_CASE__ ( __a , **__a ): snake_case_ : Dict = dataset.filter(**__a ) def SCREAMING_SNAKE_CASE__ ( ): snake_case_ : Tuple = {'num examples': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case_ : Dict = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} ) snake_case_ : List[Any] = generate_example_dataset( os.path.join(__a , 'dataset.arrow' ) , __a , num_examples=__a ) snake_case_ : str = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__a ) def tokenize(__a ): return tokenizer(examples['text'] ) snake_case_ : Any = map(__a ) snake_case_ : Tuple = map(__a , batched=__a ) snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='numpy' ): snake_case_ : Optional[int] = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='pandas' ): snake_case_ : str = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='torch' , columns='numbers' ): snake_case_ : int = map(__a , function=lambda __a : None , batched=__a ) with dataset.formatted_as(type='tensorflow' , columns='numbers' ): snake_case_ : List[Any] = map(__a , function=lambda __a : None , batched=__a ) snake_case_ : int = map(__a , function=__a , batched=__a ) snake_case_ : Optional[Any] = filter(__a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(__a , 'wb' ) as f: f.write(json.dumps(__a ).encode('utf-8' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
327
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case_ ): '''simple docstring''' a__ = ["pixel_values"] def __init__( self : Dict , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Tuple , ) -> None: """simple docstring""" super().__init__(**_A ) __magic_name__ = size if size is not None else {'shortest_edge': 256} __magic_name__ = get_size_dict(_A , default_to_square=_A ) __magic_name__ = crop_size if crop_size is not None else {'height': 224, 'width': 224} __magic_name__ = get_size_dict(_A ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(_A , size=size["""shortest_edge"""] , default_to_square=_A ) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(_A ) return center_crop(_A , size=(size["""height"""], size["""width"""]) , data_format=_A , **_A ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def _lowercase ( self : Any , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> int: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(_A , default_to_square=_A ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(_A ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(_A ) for image in images] if do_resize: __magic_name__ = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=_A , size=_A ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __magic_name__ = [to_channel_dimension_format(_A , _A ) for image in images] __magic_name__ = {'pixel_values': images} return BatchFeature(data=_A , tensor_type=_A )
88
from collections import namedtuple import requests from lxml import html # type: ignore _SCREAMING_SNAKE_CASE = namedtuple("""covid_data""", """cases deaths recovered""") def SCREAMING_SNAKE_CASE__ ( __a = "https://www.worldometers.info/coronavirus/" ): snake_case_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__a ).content ).xpath(__a ) ) _SCREAMING_SNAKE_CASE = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
327
0
"""simple docstring""" import numpy as np import qiskit def __UpperCAmelCase ( __UpperCamelCase = 8 , __UpperCamelCase = None ): __lowercase : str = np.random.default_rng(seed=__a ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. __lowercase : Tuple = 6 * key_len # Measurement basis for Alice's qubits. __lowercase : Optional[Any] = rng.integers(2 , size=__a ) # The set of states Alice will prepare. __lowercase : str = rng.integers(2 , size=__a ) # Measurement basis for Bob's qubits. __lowercase : str = rng.integers(2 , size=__a ) # Quantum Circuit to simulate BB84 __lowercase : str = qiskit.QuantumCircuit(__a , name='''BB84''' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(__a ): if alice_state[index] == 1: bbaa_circ.x(__a ) if alice_basis[index] == 1: bbaa_circ.h(__a ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(__a ): if bob_basis[index] == 1: bbaa_circ.h(__a ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. __lowercase : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. __lowercase : List[str] = qiskit.execute(__a , __a , shots=1 , seed_simulator=__a ) # Returns the result of measurement. __lowercase : Union[str, Any] = job.result().get_counts(__a ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. __lowercase : int = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( __a , __a , __a ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. __lowercase : Dict = gen_key[:key_len] if len(__a ) >= key_len else gen_key.ljust(__a , '''0''' ) return key if __name__ == "__main__": print(F"The generated key is : {bbaa(8, seed=0)}") from doctest import testmod testmod()
249
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_lxmert import LxmertTokenizer _SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _SCREAMING_SNAKE_CASE = { """vocab_file""": { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""", }, """tokenizer_file""": { """unc-nlp/lxmert-base-uncased""": ( """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json""" ), }, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": 5_12, } _SCREAMING_SNAKE_CASE = { """unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True}, } class SCREAMING_SNAKE_CASE_ ( snake_case_ ): __magic_name__: List[Any] = VOCAB_FILES_NAMES __magic_name__: List[str] = PRETRAINED_VOCAB_FILES_MAP __magic_name__: List[str] = PRETRAINED_INIT_CONFIGURATION __magic_name__: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__: Union[str, Any] = LxmertTokenizer def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=True , _A : Dict="[UNK]" , _A : Optional[int]="[SEP]" , _A : Dict="[PAD]" , _A : Union[str, Any]="[CLS]" , _A : str="[MASK]" , _A : Tuple=True , _A : Dict=None , **_A : List[Any] , ) -> Optional[int]: """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _A ) != do_lower_case or normalizer_state.get('strip_accents' , _A ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _A ) != tokenize_chinese_chars ): snake_case_ : Tuple = getattr(_A , normalizer_state.pop('type' ) ) snake_case_ : Union[str, Any] = do_lower_case snake_case_ : int = strip_accents snake_case_ : Optional[Any] = tokenize_chinese_chars snake_case_ : List[Any] = normalizer_class(**_A ) snake_case_ : Tuple = do_lower_case def UpperCAmelCase_ ( self : Dict , _A : Any , _A : List[Any]=None ) -> Dict: """simple docstring""" snake_case_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase_ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]: """simple docstring""" snake_case_ : str = [self.sep_token_id] snake_case_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ) -> Tuple[str]: """simple docstring""" snake_case_ : Union[str, Any] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
327
0
"""simple docstring""" from __future__ import annotations class lowerCAmelCase_ : """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE__ ) -> None: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = data SCREAMING_SNAKE_CASE__ : Node | None = None SCREAMING_SNAKE_CASE__ : Node | None = None def lowercase_ ( _snake_case ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowercase_ ( _snake_case ): return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0 def lowercase_ ( _snake_case ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowercase_ ( ): # Main function for testing. SCREAMING_SNAKE_CASE__ : str = Node(1 ) SCREAMING_SNAKE_CASE__ : Dict = Node(2 ) SCREAMING_SNAKE_CASE__ : str = Node(3 ) SCREAMING_SNAKE_CASE__ : Any = Node(4 ) SCREAMING_SNAKE_CASE__ : List[Any] = Node(5 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = Node(6 ) SCREAMING_SNAKE_CASE__ : Tuple = Node(7 ) SCREAMING_SNAKE_CASE__ : int = Node(8 ) SCREAMING_SNAKE_CASE__ : Any = Node(9 ) print(is_full_binary_tree(__a ) ) print(depth_of_tree(__a ) ) print("""Tree is: """ ) display(__a ) if __name__ == "__main__": main()
25
def SCREAMING_SNAKE_CASE__ ( __a ): if not isinstance(__a , __a ): snake_case_ : int = f"""Input value of [number={number}] must be an integer""" raise TypeError(__a ) if number < 0: return False snake_case_ : Dict = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
327
0
import numpy as np def UpperCAmelCase__ ( lowerCamelCase ): return 1 / (1 + np.exp(-vector )) def UpperCAmelCase__ ( lowerCamelCase ): return vector * sigmoid(1.702 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
236
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _SCREAMING_SNAKE_CASE = { """configuration_autoformer""": [ """AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AutoformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ """AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """AutoformerForPrediction""", """AutoformerModel""", """AutoformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
327
0
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # General docstring _SCREAMING_SNAKE_CASE = 'MobileNetV1Config' # Base docstring _SCREAMING_SNAKE_CASE = 'google/mobilenet_v1_1.0_224' _SCREAMING_SNAKE_CASE = [1, 1_024, 7, 7] # Image classification docstring _SCREAMING_SNAKE_CASE = 'google/mobilenet_v1_1.0_224' _SCREAMING_SNAKE_CASE = 'tabby, tabby cat' _SCREAMING_SNAKE_CASE = [ 'google/mobilenet_v1_1.0_224', 'google/mobilenet_v1_0.75_192', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def snake_case ( snake_case__ :Tuple , snake_case__ :Tuple , snake_case__ :Any=None) -> Optional[Any]: _A = {} if isinstance(__a , __a): _A = model.mobilenet_va else: _A = model _A = 'MobilenetV1/Conv2d_0/' _A = backbone.conv_stem.convolution.weight _A = backbone.conv_stem.normalization.bias _A = backbone.conv_stem.normalization.weight _A = backbone.conv_stem.normalization.running_mean _A = backbone.conv_stem.normalization.running_var for i in range(13): _A = i + 1 _A = i * 2 _A = backbone.layer[pt_index] _A = F'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' _A = pointer.convolution.weight _A = pointer.normalization.bias _A = pointer.normalization.weight _A = pointer.normalization.running_mean _A = pointer.normalization.running_var _A = backbone.layer[pt_index + 1] _A = F'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' _A = pointer.convolution.weight _A = pointer.normalization.bias _A = pointer.normalization.weight _A = pointer.normalization.running_mean _A = pointer.normalization.running_var if isinstance(__a , __a): _A = 'MobilenetV1/Logits/Conv2d_1c_1x1/' _A = model.classifier.weight _A = model.classifier.bias return tf_to_pt_map def snake_case ( snake_case__ :Tuple , snake_case__ :Optional[int] , snake_case__ :Optional[Any]) -> Optional[int]: try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""") raise # Load weights from TF model _A = tf.train.list_variables(__a) _A = {} for name, shape in init_vars: logger.info(F'''Loading TF weight {name} with shape {shape}''') _A = tf.train.load_variable(__a , __a) _A = array # Build TF to PyTorch weights loading map _A = _build_tf_to_pytorch_map(__a , __a , __a) for name, pointer in tf_to_pt_map.items(): logger.info(F'''Importing {name}''') if name not in tf_weights: logger.info(F'''{name} not in tf pre-trained weights, skipping''') continue _A = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""") _A = np.transpose(__a , (2, 3, 0, 1)) elif "weights" in name: logger.info("""Transposing""") if len(pointer.shape) == 2: # copying into linear layer _A = array.squeeze().transpose() else: _A = np.transpose(__a , (3, 2, 0, 1)) if pointer.shape != array.shape: raise ValueError(F'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''') logger.info(F'''Initialize PyTorch weight {name} {array.shape}''') _A = torch.from_numpy(__a) tf_weights.pop(__a , __a) tf_weights.pop(name + """/RMSProp""" , __a) tf_weights.pop(name + """/RMSProp_1""" , __a) tf_weights.pop(name + """/ExponentialMovingAverage""" , __a) logger.info(F'''Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}''') return model def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict) -> int: _A = features.shape[-2:] _A = conv_layer.stride _A = conv_layer.kernel_size if in_height % stride_height == 0: _A = max(kernel_height - stride_height , 0) else: _A = max(kernel_height - (in_height % stride_height) , 0) if in_width % stride_width == 0: _A = max(kernel_width - stride_width , 0) else: _A = max(kernel_width - (in_width % stride_width) , 0) _A = pad_along_width // 2 _A = pad_along_width - pad_left _A = pad_along_height // 2 _A = pad_along_height - pad_top _A = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(__a , __a , """constant""" , 0.0) class a ( nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = True , ) -> None: super().__init__() _A = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) _A = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) _A = nn.Convad( in_channels=_A , out_channels=_A , kernel_size=_A , stride=_A , padding=_A , groups=_A , bias=_A , padding_mode="""zeros""" , ) if use_normalization: _A = nn.BatchNormad( num_features=_A , eps=config.layer_norm_eps , momentum=0.9997 , affine=_A , track_running_stats=_A , ) else: _A = None if use_activation: if isinstance(_A , _A ): _A = ACTaFN[use_activation] elif isinstance(config.hidden_act , _A ): _A = ACTaFN[config.hidden_act] else: _A = config.hidden_act else: _A = None def UpperCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor: if self.config.tf_padding: _A = apply_tf_padding(_A , self.convolution ) _A = self.convolution(_A ) if self.normalization is not None: _A = self.normalization(_A ) if self.activation is not None: _A = self.activation(_A ) return features class a ( snake_case_ ): """simple docstring""" lowerCamelCase :Tuple = MobileNetVaConfig lowerCamelCase :Union[str, Any] = load_tf_weights_in_mobilenet_va lowerCamelCase :int = "mobilenet_v1" lowerCamelCase :int = "pixel_values" lowerCamelCase :int = False def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None: if isinstance(_A , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(_A , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) _SCREAMING_SNAKE_CASE = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _SCREAMING_SNAKE_CASE = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , snake_case_ , ) class a ( snake_case_ ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> str: super().__init__(_A ) _A = config _A = 32 _A = max(int(depth * config.depth_multiplier ) , config.min_depth ) _A = MobileNetVaConvLayer( _A , in_channels=config.num_channels , out_channels=_A , kernel_size=3 , stride=2 , ) _A = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] _A = nn.ModuleList() for i in range(13 ): _A = out_channels if strides[i] == 2 or i == 0: depth *= 2 _A = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( _A , in_channels=_A , out_channels=_A , kernel_size=3 , stride=strides[i] , groups=_A , ) ) self.layer.append( MobileNetVaConvLayer( _A , in_channels=_A , out_channels=_A , kernel_size=1 , ) ) _A = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int: raise NotImplementedError @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def UpperCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: _A = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _A = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) _A = self.conv_stem(_A ) _A = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): _A = layer_module(_A ) if output_hidden_states: _A = all_hidden_states + (hidden_states,) _A = hidden_states if self.pooler is not None: _A = torch.flatten(self.pooler(_A ) , start_dim=1 ) else: _A = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_A , pooler_output=_A , hidden_states=_A , ) @add_start_docstrings( '''\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , snake_case_ , ) class a ( snake_case_ ): """simple docstring""" def __init__( self , lowerCAmelCase_ ) -> None: super().__init__(_A ) _A = config.num_labels _A = MobileNetVaModel(_A ) _A = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head _A = nn.Dropout(config.classifier_dropout_prob , inplace=_A ) _A = nn.Linear(_A , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_A ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def UpperCAmelCase ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: _A = return_dict if return_dict is not None else self.config.use_return_dict _A = self.mobilenet_va(_A , output_hidden_states=_A , return_dict=_A ) _A = outputs.pooler_output if return_dict else outputs[1] _A = self.classifier(self.dropout(_A ) ) _A = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _A = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _A = 'single_label_classification' else: _A = 'multi_label_classification' if self.config.problem_type == "regression": _A = MSELoss() if self.num_labels == 1: _A = loss_fct(logits.squeeze() , labels.squeeze() ) else: _A = loss_fct(_A , _A ) elif self.config.problem_type == "single_label_classification": _A = CrossEntropyLoss() _A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": _A = BCEWithLogitsLoss() _A = loss_fct(_A , _A ) if not return_dict: _A = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=_A , logits=_A , hidden_states=outputs.hidden_states , )
180
from typing import Dict from .base import GenericTensor, Pipeline class SCREAMING_SNAKE_CASE_ ( snake_case_ ): def UpperCAmelCase_ ( self : str , _A : Optional[Any]=None , _A : List[str]=None , _A : Optional[Any]=None , **_A : List[str] ) -> Any: """simple docstring""" if tokenize_kwargs is None: snake_case_ : Optional[Any] = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( 'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' ) snake_case_ : int = truncation snake_case_ : Optional[int] = tokenize_kwargs snake_case_ : Dict = {} if return_tensors is not None: snake_case_ : Union[str, Any] = return_tensors return preprocess_params, {}, postprocess_params def UpperCAmelCase_ ( self : Optional[int] , _A : int , **_A : Any ) -> Dict[str, GenericTensor]: """simple docstring""" snake_case_ : Dict = self.framework snake_case_ : Any = self.tokenizer(_A , return_tensors=_A , **_A ) return model_inputs def UpperCAmelCase_ ( self : Optional[Any] , _A : List[str] ) -> int: """simple docstring""" snake_case_ : Tuple = self.model(**_A ) return model_outputs def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str=False ) -> Any: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *_A : Union[str, Any] , **_A : Tuple ) -> List[str]: """simple docstring""" return super().__call__(*_A , **_A )
327
0
import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow lowercase__ : Tuple = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-classification", "language-modeling", "summarization", "token-classification", "question-answering", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_mlm_flax import run_qa import run_summarization_flax import run_ta_mlm_flax logging.basicConfig(level=logging.DEBUG) lowercase__ : List[Any] = logging.getLogger() def A_ ( ) -> List[Any]: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) __UpperCamelCase = parser.parse_args() return args.f def A_ ( snake_case : Tuple , snake_case : List[str]="eval" ) -> Optional[int]: '''simple docstring''' __UpperCamelCase = os.path.join(snake_case , f"{split}_results.json" ) if os.path.exists(snake_case ): with open(snake_case , '''r''' ) as f: return json.load(snake_case ) raise ValueError(f"can't find {path}" ) lowercase__ : List[Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = F"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_glue.main() __UpperCamelCase = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) @slow def A__ ( self )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = F"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_clm_flax.main() __UpperCamelCase = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 100 ) @slow def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = F"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_summarization_flax.main() __UpperCamelCase = get_results(SCREAMING_SNAKE_CASE_ , split='''test''' ) self.assertGreaterEqual(result['''test_rouge1'''] , 10 ) self.assertGreaterEqual(result['''test_rouge2'''] , 2 ) self.assertGreaterEqual(result['''test_rougeL'''] , 7 ) self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 ) @slow def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = F"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_mlm_flax.main() __UpperCamelCase = get_results(SCREAMING_SNAKE_CASE_ ) self.assertLess(result['''eval_perplexity'''] , 42 ) @slow def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = F"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_ta_mlm_flax.main() __UpperCamelCase = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 ) @slow def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = 7 if get_gpu_count() > 1 else 2 __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = F"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_flax_ner.main() __UpperCamelCase = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 ) self.assertGreaterEqual(result['''eval_f1'''] , 0.3 ) @slow def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = F"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split() with patch.object(SCREAMING_SNAKE_CASE_ , '''argv''' , SCREAMING_SNAKE_CASE_ ): run_qa.main() __UpperCamelCase = get_results(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(result['''eval_f1'''] , 30 ) self.assertGreaterEqual(result['''eval_exact'''] , 30 )
328
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) lowercase__ : Optional[Any] = ["names", "prefix"] lowercase__ : List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] lowercase__ : Optional[Any] = ["encoding_errors", "on_bad_lines"] lowercase__ : List[str] = ["date_format"] @dataclass class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ): """simple docstring""" _snake_case = "," _snake_case = None _snake_case = "infer" _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = False _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = False _snake_case = True _snake_case = None _snake_case = "." _snake_case = None _snake_case = '"' _snake_case = 0 _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = 0 _snake_case = True _snake_case = False _snake_case = None _snake_case = 10000 _snake_case = None _snake_case = "strict" _snake_case = "error" _snake_case = None def A__ ( self )-> Any: '''simple docstring''' if self.delimiter is not None: __UpperCamelCase = self.delimiter if self.column_names is not None: __UpperCamelCase = self.column_names @property def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ): """simple docstring""" _snake_case = CsvConfig def A__ ( self )-> Any: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) __UpperCamelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ): __UpperCamelCase = data_files if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __UpperCamelCase = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) ) return splits def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.Table: '''simple docstring''' if self.config.features is not None: __UpperCamelCase = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE_ ) for feature in self.config.features.values() ): # cheaper cast __UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __UpperCamelCase = table_cast(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return pa_table def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __UpperCamelCase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE_ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ): __UpperCamelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , iterator=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = pa.Table.from_pandas(SCREAMING_SNAKE_CASE_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ ) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}" ) raise
328
1
def A_ ( snake_case : int = 1000000 ) -> int: '''simple docstring''' __UpperCamelCase = set(range(3 , snake_case , 2 ) ) primes.add(2 ) for p in range(3 , snake_case , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , snake_case , snake_case ) ) ) __UpperCamelCase = [float(snake_case ) for n in range(limit + 1 )] for p in primes: for n in range(snake_case , limit + 1 , snake_case ): phi[n] *= 1 - 1 / p return int(sum(phi[2:] ) ) if __name__ == "__main__": print(F"{solution() = }")
328
from __future__ import annotations import math def A_ ( snake_case : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def A_ ( snake_case : int ) -> list[int]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) __UpperCamelCase = [] for num in range(len(snake_case ) ): __UpperCamelCase = 0 while 2 * i * i <= odd_composites[num]: __UpperCamelCase = odd_composites[num] - 2 * i * i if is_prime(snake_case ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case ) == n: return list_nums return [] def A_ ( ) -> int: '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F"{solution() = }")
328
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch lowercase__ : Optional[Any] = logging.get_logger(__name__) @dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=6.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="fp4" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , )-> int: '''simple docstring''' __UpperCamelCase = load_in_abit __UpperCamelCase = load_in_abit __UpperCamelCase = llm_inta_threshold __UpperCamelCase = llm_inta_skip_modules __UpperCamelCase = llm_inta_enable_fpaa_cpu_offload __UpperCamelCase = llm_inta_has_fpaa_weight __UpperCamelCase = bnb_abit_quant_type __UpperCamelCase = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: __UpperCamelCase = torch.floataa elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , torch.dtype ): __UpperCamelCase = bnb_abit_compute_dtype else: raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' ) self.post_init() def A__ ( self )-> List[str]: '''simple docstring''' if not isinstance(self.llm_inta_threshold , SCREAMING_SNAKE_CASE_ ): raise ValueError('''llm_int8_threshold must be a float''' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , SCREAMING_SNAKE_CASE_ ): raise ValueError('''llm_int8_skip_modules must be a list of strings''' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , SCREAMING_SNAKE_CASE_ ): raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' ) if not isinstance(self.llm_inta_has_fpaa_weight , SCREAMING_SNAKE_CASE_ ): raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' ) if not isinstance(self.bnb_abit_quant_type , SCREAMING_SNAKE_CASE_ ): raise ValueError('''bnb_4bit_quant_type must be a string''' ) if not isinstance(self.bnb_abit_use_double_quant , SCREAMING_SNAKE_CASE_ ): raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' ) if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse( '''0.39.0''' ): raise ValueError( '''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' ) def A__ ( self )-> Optional[Any]: '''simple docstring''' return self.load_in_abit or self.load_in_abit def A__ ( self )-> Union[str, Any]: '''simple docstring''' if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def A__ ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' __UpperCamelCase = cls(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = [] for key, value in kwargs.items(): if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) to_remove.append(SCREAMING_SNAKE_CASE_ ) for key in to_remove: kwargs.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if return_unused_kwargs: return config, kwargs else: return config def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[Any]: '''simple docstring''' with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: __UpperCamelCase = self.to_dict() __UpperCamelCase = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + '''\n''' writer.write(SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Dict[str, Any]: '''simple docstring''' __UpperCamelCase = copy.deepcopy(self.__dict__ ) __UpperCamelCase = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1] return output def __repr__( self )-> Tuple: '''simple docstring''' return F"{self.__class__.__name__} {self.to_json_string()}" def A__ ( self , SCREAMING_SNAKE_CASE_ = True )-> str: '''simple docstring''' if use_diff is True: __UpperCamelCase = self.to_diff_dict() else: __UpperCamelCase = self.to_dict() return json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n" def A__ ( self )-> Dict[str, Any]: '''simple docstring''' __UpperCamelCase = self.to_dict() # get the default config dict __UpperCamelCase = BitsAndBytesConfig().to_dict() __UpperCamelCase = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: __UpperCamelCase = value return serializable_config_dict
328
from __future__ import annotations from collections.abc import Callable def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float: '''simple docstring''' __UpperCamelCase = x_start __UpperCamelCase = fnc(snake_case ) __UpperCamelCase = 0.0 for _ in range(snake_case ): # Approximates small segments of curve as linear and solve # for trapezoidal area __UpperCamelCase = (x_end - x_start) / steps + xa __UpperCamelCase = fnc(snake_case ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __UpperCamelCase = xa __UpperCamelCase = fxa return area if __name__ == "__main__": def A_ ( snake_case : Tuple ) -> Optional[Any]: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") lowercase__ : List[str] = 1_0 while i <= 1_0_0_0_0_0: print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}") i *= 1_0
328
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowercase__ : int = ["bert-base-uncased", "bert-base-cased"] lowercase__ : Dict = "hf-internal-testing/tiny-bert-tf-only" if is_tf_available(): class SCREAMING_SNAKE_CASE__ ( tf.keras.Model ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' super().__init__() __UpperCamelCase = tokenizer __UpperCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = TFAutoModel.from_config(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.bert(**SCREAMING_SNAKE_CASE_ ) return out["pooler_output"] @require_tf @require_tensorflow_text class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> Union[str, Any]: '''simple docstring''' super().setUp() __UpperCamelCase = [ BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false __UpperCamelCase = [TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , use_fast_bert_tokenizer=SCREAMING_SNAKE_CASE_ ) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers ) == len(self.tf_tokenizers ) __UpperCamelCase = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] __UpperCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in (self.test_sentences, self.paired_sentences): __UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' , padding='''longest''' ) __UpperCamelCase = tf_tokenizer(SCREAMING_SNAKE_CASE_ ) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) ) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) ) @slow def A__ ( self )-> List[str]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __UpperCamelCase = tf_tokenizer(self.paired_sentences ) __UpperCamelCase = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) ) @slow def A__ ( self )-> Tuple: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __UpperCamelCase = tf.function(SCREAMING_SNAKE_CASE_ ) for test_inputs in (self.test_sentences, self.paired_sentences): __UpperCamelCase = tf.constant(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = compiled_tokenizer(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = tf_tokenizer(SCREAMING_SNAKE_CASE_ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A__ ( self )-> List[str]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: __UpperCamelCase = ModelToSave(tokenizer=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = tf.convert_to_tensor(self.test_sentences ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: __UpperCamelCase = Path(SCREAMING_SNAKE_CASE_ ) / '''saved.model''' model.save(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = tf.keras.models.load_model(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = loaded_model(SCREAMING_SNAKE_CASE_ ) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
328
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = ["model.decoder.embed_positions.weights"] def A_ ( snake_case : Any ) -> List[Any]: '''simple docstring''' if "emb" in name: __UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: __UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: __UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: __UpperCamelCase = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: __UpperCamelCase = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: __UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: __UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: __UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: __UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: __UpperCamelCase = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: __UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def A_ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]: '''simple docstring''' __UpperCamelCase = list(state_dict.keys() ) __UpperCamelCase = {} for key in keys: __UpperCamelCase = state_dict.pop(snake_case ) __UpperCamelCase = rename_keys(snake_case ) if "in_proj_weight" in key: # split fused qkv proj __UpperCamelCase = val[:hidden_size, :] __UpperCamelCase = val[hidden_size : 2 * hidden_size, :] __UpperCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __UpperCamelCase = val else: __UpperCamelCase = val return state_dict, enc_dec_proj_state_dict def A_ ( snake_case : str ) -> MusicgenDecoderConfig: '''simple docstring''' if checkpoint == "small": # default config values __UpperCamelCase = 1024 __UpperCamelCase = 24 __UpperCamelCase = 16 elif checkpoint == "medium": __UpperCamelCase = 1536 __UpperCamelCase = 48 __UpperCamelCase = 24 elif checkpoint == "large": __UpperCamelCase = 2048 __UpperCamelCase = 48 __UpperCamelCase = 32 else: raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) __UpperCamelCase = MusicgenDecoderConfig( hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , ) return config @torch.no_grad() def A_ ( snake_case : Any , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]="cpu" ) -> List[Any]: '''simple docstring''' __UpperCamelCase = MusicGen.get_pretrained(snake_case , device=snake_case ) __UpperCamelCase = decoder_config_from_checkpoint(snake_case ) __UpperCamelCase = fairseq_model.lm.state_dict() __UpperCamelCase , __UpperCamelCase = rename_state_dict( snake_case , hidden_size=decoder_config.hidden_size ) __UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' ) __UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) __UpperCamelCase = MusicgenForCausalLM(snake_case ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(snake_case , strict=snake_case ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(snake_case ) if len(snake_case ) > 0: raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" ) if len(snake_case ) > 0: raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model __UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(snake_case ) # check we can do a forward pass __UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __UpperCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __UpperCamelCase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits if logits.shape != (8, 1, 2048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor __UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) __UpperCamelCase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case ) # set the appropriate bos/pad token ids __UpperCamelCase = 2048 __UpperCamelCase = 2048 # set other default generation config params __UpperCamelCase = int(30 * audio_encoder.config.frame_rate ) __UpperCamelCase = True __UpperCamelCase = 3.0 if pytorch_dump_folder is not None: Path(snake_case ).mkdir(exist_ok=snake_case ) logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(snake_case ) processor.save_pretrained(snake_case ) if repo_id: logger.info(f"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(snake_case ) processor.push_to_hub(snake_case ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) lowercase__ : Tuple = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
328
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowercase__ : str = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'whisper' _snake_case = ['past_key_values'] _snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = num_mel_bins __UpperCamelCase = d_model __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase = max_source_positions __UpperCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __UpperCamelCase = classifier_proj_size __UpperCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase = apply_spec_augment __UpperCamelCase = mask_time_prob __UpperCamelCase = mask_time_length __UpperCamelCase = mask_time_min_masks __UpperCamelCase = mask_feature_prob __UpperCamelCase = mask_feature_length __UpperCamelCase = mask_feature_min_masks __UpperCamelCase = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def A__ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' __UpperCamelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: __UpperCamelCase = {0: '''batch'''} else: __UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' ) return common_inputs def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]: '''simple docstring''' __UpperCamelCase = OrderedDict() __UpperCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = encoder_inputs['''input_features'''].shape[2] __UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __UpperCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = encoder_inputs.pop('''input_features''' ) __UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: __UpperCamelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def A__ ( self )-> float: '''simple docstring''' return 1E-3
328
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase__ : List[str] = 1_6 lowercase__ : str = 3_2 def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __UpperCamelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCamelCase = datasets.map( snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case : str ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCamelCase = 16 elif accelerator.mixed_precision != "no": __UpperCamelCase = 8 else: __UpperCamelCase = None return tokenizer.pad( snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) __UpperCamelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase__ : Union[str, Any] = mocked_dataloaders # noqa: F811 def A_ ( snake_case : List[str] , snake_case : List[Any] ) -> Tuple: '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1": __UpperCamelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __UpperCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: __UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['''lr'''] __UpperCamelCase = int(config['''num_epochs'''] ) __UpperCamelCase = int(config['''seed'''] ) __UpperCamelCase = int(config['''batch_size'''] ) set_seed(snake_case ) __UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case ) __UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation __UpperCamelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE __UpperCamelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer __UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case ) # Instantiate scheduler __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0] accelerator.init_trackers(snake_case , snake_case ) # Now we train the model for epoch in range(snake_case ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __UpperCamelCase = 0 for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) __UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case , references=snake_case , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , snake_case ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(snake_case ), '''epoch''': epoch, } , step=snake_case , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def A_ ( ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(snake_case , snake_case ) if __name__ == "__main__": main()
328
1
import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration lowercase__ : int = pytest.mark.integration lowercase__ : Dict = {"comet"} lowercase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None lowercase__ : str = {"code_eval"} lowercase__ : int = os.name == "nt" lowercase__ : int = {"bertscore", "frugalscore", "perplexity"} lowercase__ : Optional[int] = importlib.util.find_spec("transformers") is not None def A_ ( snake_case : int ) -> Optional[int]: '''simple docstring''' @wraps(snake_case ) def wrapper(self : Dict , snake_case : Any ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest('''"test requires Fairseq"''' ) else: test_case(self , snake_case ) return wrapper def A_ ( snake_case : Tuple ) -> Tuple: '''simple docstring''' @wraps(snake_case ) def wrapper(self : Optional[int] , snake_case : Dict ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest('''"test requires transformers"''' ) else: test_case(self , snake_case ) return wrapper def A_ ( snake_case : List[Any] ) -> Optional[int]: '''simple docstring''' @wraps(snake_case ) def wrapper(self : Optional[Any] , snake_case : Optional[Any] ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest('''"test not supported on Windows"''' ) else: test_case(self , snake_case ) return wrapper def A_ ( ) -> List[str]: '''simple docstring''' __UpperCamelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @local class SCREAMING_SNAKE_CASE__ ( parameterized.TestCase ): """simple docstring""" _snake_case = {} _snake_case = None @pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' ) @pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' __UpperCamelCase = '''[...]''' __UpperCamelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE_ ) ).module_path ) __UpperCamelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE_ ) # check parameters __UpperCamelCase = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(SCREAMING_SNAKE_CASE_ , metric_module.__name__ ): with self.use_local_metrics(): try: __UpperCamelCase = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' __UpperCamelCase = '''[...]''' __UpperCamelCase = importlib.import_module( datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE_ ) ).module_path ) # run doctest with self.use_local_metrics(): __UpperCamelCase = doctest.testmod(SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , raise_on_error=SCREAMING_SNAKE_CASE_ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE_ ): yield else: yield @contextmanager def A__ ( self )-> List[str]: '''simple docstring''' def load_local_metric(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): return load_metric(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE_ ) , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) with patch('''datasets.load_metric''' ) as mock_load_metric: __UpperCamelCase = load_local_metric yield @classmethod def A__ ( cls , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' def wrapper(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = contextmanager(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher('''bleurt''' ) def A_ ( snake_case : Any ) -> Dict: '''simple docstring''' import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[Any]: '''simple docstring''' assert len(input_dict['''input_ids'''] ) == 2 return np.array([1.0_3, 1.0_4] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor: __UpperCamelCase = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher('''bertscore''' ) def A_ ( snake_case : Union[str, Any] ) -> Optional[int]: '''simple docstring''' import torch def bert_cos_score_idf(snake_case : Tuple , snake_case : Any , *snake_case : Optional[int] , **snake_case : Optional[Any] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch('''bert_score.scorer.get_model''' ), patch( '''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf: __UpperCamelCase = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher('''comet''' ) def A_ ( snake_case : List[str] ) -> Optional[int]: '''simple docstring''' def load_from_checkpoint(snake_case : Dict ): class SCREAMING_SNAKE_CASE__ : """simple docstring""" def A__ ( self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Optional[Any]: '''simple docstring''' assert len(SCREAMING_SNAKE_CASE_ ) == 2 __UpperCamelCase = [0.1_9, 0.9_2] return scores, sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch('''comet.download_model''' ) as mock_download_model: __UpperCamelCase = None with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint: __UpperCamelCase = load_from_checkpoint yield def A_ ( ) -> Dict: '''simple docstring''' __UpperCamelCase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) ) __UpperCamelCase = '''ERROR''' __UpperCamelCase = f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}" with pytest.raises(snake_case , match=re.escape(snake_case ) ): metric.compute(predictions=[] , references=[] , scheme=snake_case )
328
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowercase__ : str = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'whisper' _snake_case = ['past_key_values'] _snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = num_mel_bins __UpperCamelCase = d_model __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase = max_source_positions __UpperCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __UpperCamelCase = classifier_proj_size __UpperCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase = apply_spec_augment __UpperCamelCase = mask_time_prob __UpperCamelCase = mask_time_length __UpperCamelCase = mask_time_min_masks __UpperCamelCase = mask_feature_prob __UpperCamelCase = mask_feature_length __UpperCamelCase = mask_feature_min_masks __UpperCamelCase = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def A__ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' __UpperCamelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: __UpperCamelCase = {0: '''batch'''} else: __UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' ) return common_inputs def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]: '''simple docstring''' __UpperCamelCase = OrderedDict() __UpperCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = encoder_inputs['''input_features'''].shape[2] __UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __UpperCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = encoder_inputs.pop('''input_features''' ) __UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: __UpperCamelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def A__ ( self )-> float: '''simple docstring''' return 1E-3
328
1
import requests from bsa import BeautifulSoup def A_ ( snake_case : str , snake_case : dict ) -> str: '''simple docstring''' __UpperCamelCase = BeautifulSoup(requests.get(snake_case , params=snake_case ).content , '''html.parser''' ) __UpperCamelCase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} ) __UpperCamelCase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' ) return anchors[2].get_text() if __name__ == "__main__": lowercase__ : List[str] = { "title": ( "Precisely geometry controlled microsupercapacitors for ultrahigh areal " "capacitance, volumetric capacitance, and energy density" ), "journal": "Chem. Mater.", "volume": 3_0, "pages": "3979-3990", "year": 2_0_1_8, "hl": "en", } print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
328
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : Any = logging.get_logger(__name__) lowercase__ : Tuple = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'xlnet' _snake_case = ['mems'] _snake_case = { 'n_token': 'vocab_size', # Backward compatibility 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="bi" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="tanh" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , )-> List[str]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = d_model __UpperCamelCase = n_layer __UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) __UpperCamelCase = d_model // n_head __UpperCamelCase = ff_activation __UpperCamelCase = d_inner __UpperCamelCase = untie_r __UpperCamelCase = attn_type __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = dropout __UpperCamelCase = mem_len __UpperCamelCase = reuse_len __UpperCamelCase = bi_data __UpperCamelCase = clamp_len __UpperCamelCase = same_length __UpperCamelCase = summary_type __UpperCamelCase = summary_use_proj __UpperCamelCase = summary_activation __UpperCamelCase = summary_last_dropout __UpperCamelCase = start_n_top __UpperCamelCase = end_n_top __UpperCamelCase = bos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = kwargs['''use_cache'''] __UpperCamelCase = use_mems_eval __UpperCamelCase = use_mems_train super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def A__ ( self )-> Optional[Any]: '''simple docstring''' logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' raise NotImplementedError( F"The model {self.model_type} is one of the few models that has no sequence length limit." )
328
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase__ : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = size if size is not None else {'''shortest_edge''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = crop_pct __UpperCamelCase = resample __UpperCamelCase = do_center_crop __UpperCamelCase = crop_size __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) if crop_pct is not None: if "shortest_edge" in size: __UpperCamelCase = int(size['''shortest_edge'''] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __UpperCamelCase = int(size['''height'''] / crop_pct ) else: __UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct )) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) else: if "shortest_edge" in size: __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ ) elif "height" in size and "width" in size: __UpperCamelCase = (size['''height'''], size['''width''']) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str: '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image: '''simple docstring''' __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = image_mean if image_mean is not None else self.image_mean __UpperCamelCase = image_std if image_std is not None else self.image_std __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else self.crop_size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_pct is None: raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: __UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
328
from typing import Any class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None def __repr__( self )-> str: '''simple docstring''' return F"Node({self.data})" class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = None def __iter__( self )-> Any: '''simple docstring''' __UpperCamelCase = self.head while node: yield node.data __UpperCamelCase = node.next def __len__( self )-> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self )-> str: '''simple docstring''' return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] ) def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) __UpperCamelCase = self.head for _ in range(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = current.next __UpperCamelCase = data def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(0 , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) __UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ ) if self.head is None: __UpperCamelCase = new_node elif index == 0: __UpperCamelCase = self.head # link new_node to head __UpperCamelCase = new_node else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = new_node def A__ ( self )-> None: # print every node data '''simple docstring''' print(self ) def A__ ( self )-> Any: '''simple docstring''' return self.delete_nth(0 ) def A__ ( self )-> Any: # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def A__ ( self , SCREAMING_SNAKE_CASE_ = 0 )-> Any: '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) __UpperCamelCase = self.head # default first node if index == 0: __UpperCamelCase = self.head.next else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = temp.next.next return delete_node.data def A__ ( self )-> bool: '''simple docstring''' return self.head is None def A__ ( self )-> None: '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = self.head while current: # Store the current node's next node. __UpperCamelCase = current.next # Make the current node's next point backwards __UpperCamelCase = prev # Make the previous node be the current node __UpperCamelCase = current # Make the current node the next node (to progress iteration) __UpperCamelCase = next_node # Return prev in order to put the head at the end __UpperCamelCase = prev def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = LinkedList() assert linked_list.is_empty() is True assert str(snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(snake_case ) == i linked_list.insert_nth(snake_case , i + 1 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(snake_case ) == 9 assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __UpperCamelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(snake_case ) == "->".join(str(snake_case ) for i in range(-8 , 1 ) ) def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = [ -9, 100, Node(77345112 ), '''dlrow olleH''', 7, 5555, 0, -192.55555, '''Hello, world!''', 77.9, Node(10 ), None, None, 12.20, ] __UpperCamelCase = LinkedList() for i in test_input: linked_list.insert_tail(snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __UpperCamelCase = linked_list.delete_head() assert result == -9 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __UpperCamelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __UpperCamelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(snake_case ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def A_ ( ) -> Any: '''simple docstring''' from doctest import testmod testmod() __UpperCamelCase = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(snake_case ) print('''\nReading/changing Node data using indexing:''' ) print(f"Element at Position 1: {linked_list[1]}" ) __UpperCamelCase = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(snake_case ) print(f"length of linked_list is : {len(snake_case )}" ) if __name__ == "__main__": main()
328
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata lowercase__ : Union[str, Any] = "" if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"): class SCREAMING_SNAKE_CASE__ ( tr.AbstractTransform ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = " " )-> str: '''simple docstring''' __UpperCamelCase = sentence_delimiter def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[Any]: '''simple docstring''' return list(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' __UpperCamelCase = [] for sent_idx, sentence in enumerate(SCREAMING_SNAKE_CASE_ ): chars.extend(self.process_string(SCREAMING_SNAKE_CASE_ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(SCREAMING_SNAKE_CASE_ ) - 1: chars.append(self.sentence_delimiter ) return chars lowercase__ : Optional[int] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: lowercase__ : Optional[Any] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) lowercase__ : Optional[Any] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n" lowercase__ : Optional[int] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n" lowercase__ : str = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def A__ ( self )-> Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', '''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''', ] , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False )-> List[str]: '''simple docstring''' if concatenate_texts: return jiwer.compute_measures( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , )["wer"] __UpperCamelCase = 0 __UpperCamelCase = 0 for prediction, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = jiwer.compute_measures( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , truth_transform=SCREAMING_SNAKE_CASE_ , hypothesis_transform=SCREAMING_SNAKE_CASE_ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
328
import math def A_ ( snake_case : int ) -> bool: '''simple docstring''' return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num def A_ ( snake_case : int ) -> bool: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = n while left <= right: __UpperCamelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
328
1
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters lowercase__ : Union[str, Any] = (7_2_0, 1_2_8_0) # Height, Width lowercase__ : Optional[int] = (0.4, 0.6) # if height or width lower than this scale, drop it. lowercase__ : List[Any] = 1 / 1_0_0 lowercase__ : str = "" lowercase__ : str = "" lowercase__ : List[Any] = "" lowercase__ : List[str] = 2_5_0 def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = get_dataset(snake_case , snake_case ) for index in range(snake_case ): __UpperCamelCase = random.sample(range(len(snake_case ) ) , 4 ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = update_image_and_anno( snake_case , snake_case , snake_case , snake_case , snake_case , filter_scale=snake_case , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCamelCase = random_chars(32 ) __UpperCamelCase = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCamelCase = f"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}" cva.imwrite(f"{file_root}.jpg" , snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" ) __UpperCamelCase = [] for anno in new_annos: __UpperCamelCase = anno[3] - anno[1] __UpperCamelCase = anno[4] - anno[2] __UpperCamelCase = anno[1] + width / 2 __UpperCamelCase = anno[2] + height / 2 __UpperCamelCase = f"{anno[0]} {x_center} {y_center} {width} {height}" annos_list.append(snake_case ) with open(f"{file_root}.txt" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def A_ ( snake_case : str , snake_case : str ) -> tuple[list, list]: '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = [] for label_file in glob.glob(os.path.join(snake_case , '''*.txt''' ) ): __UpperCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(snake_case ) as in_file: __UpperCamelCase = in_file.readlines() __UpperCamelCase = os.path.join(snake_case , f"{label_name}.jpg" ) __UpperCamelCase = [] for obj_list in obj_lists: __UpperCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) __UpperCamelCase = float(obj[1] ) - float(obj[3] ) / 2 __UpperCamelCase = float(obj[2] ) - float(obj[4] ) / 2 __UpperCamelCase = float(obj[1] ) + float(obj[3] ) / 2 __UpperCamelCase = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(snake_case ) labels.append(snake_case ) return img_paths, labels def A_ ( snake_case : list , snake_case : list , snake_case : list[int] , snake_case : tuple[int, int] , snake_case : tuple[float, float] , snake_case : float = 0.0 , ) -> tuple[list, list, str]: '''simple docstring''' __UpperCamelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) __UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __UpperCamelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) __UpperCamelCase = int(scale_x * output_size[1] ) __UpperCamelCase = int(scale_y * output_size[0] ) __UpperCamelCase = [] __UpperCamelCase = [] for i, index in enumerate(snake_case ): __UpperCamelCase = all_img_list[index] path_list.append(snake_case ) __UpperCamelCase = all_annos[index] __UpperCamelCase = cva.imread(snake_case ) if i == 0: # top-left __UpperCamelCase = cva.resize(snake_case , (divid_point_x, divid_point_y) ) __UpperCamelCase = img for bbox in img_annos: __UpperCamelCase = bbox[1] * scale_x __UpperCamelCase = bbox[2] * scale_y __UpperCamelCase = bbox[3] * scale_x __UpperCamelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right __UpperCamelCase = cva.resize(snake_case , (output_size[1] - divid_point_x, divid_point_y) ) __UpperCamelCase = img for bbox in img_annos: __UpperCamelCase = scale_x + bbox[1] * (1 - scale_x) __UpperCamelCase = bbox[2] * scale_y __UpperCamelCase = scale_x + bbox[3] * (1 - scale_x) __UpperCamelCase = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left __UpperCamelCase = cva.resize(snake_case , (divid_point_x, output_size[0] - divid_point_y) ) __UpperCamelCase = img for bbox in img_annos: __UpperCamelCase = bbox[1] * scale_x __UpperCamelCase = scale_y + bbox[2] * (1 - scale_y) __UpperCamelCase = bbox[3] * scale_x __UpperCamelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right __UpperCamelCase = cva.resize( snake_case , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) __UpperCamelCase = img for bbox in img_annos: __UpperCamelCase = scale_x + bbox[1] * (1 - scale_x) __UpperCamelCase = scale_y + bbox[2] * (1 - scale_y) __UpperCamelCase = scale_x + bbox[3] * (1 - scale_x) __UpperCamelCase = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: __UpperCamelCase = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def A_ ( snake_case : int ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" __UpperCamelCase = ascii_lowercase + digits return "".join(random.choice(snake_case ) for _ in range(snake_case ) ) if __name__ == "__main__": main() print("DONE ✅")
328
def A_ ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowercase__ : List[str] = generate_large_matrix() lowercase__ : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A_ ( snake_case : list[list[int]] ) -> None: '''simple docstring''' assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid ) assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) ) def A_ ( snake_case : list[int] ) -> int: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __UpperCamelCase = (left + right) // 2 __UpperCamelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __UpperCamelCase = mid + 1 else: __UpperCamelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(snake_case ) def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = len(grid[0] ) for i in range(len(snake_case ) ): __UpperCamelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(snake_case ) * len(grid[0] )) - total def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' __UpperCamelCase = 0 for row in grid: for i, number in enumerate(snake_case ): if number < 0: total += len(snake_case ) - i break return total def A_ ( ) -> None: '''simple docstring''' from timeit import timeit print('''Running benchmarks''' ) __UpperCamelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 ) print(f"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
328
1
import numpy # List of input, output pairs lowercase__ : Union[str, Any] = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) lowercase__ : Optional[int] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) lowercase__ : int = [2, 4, 1, 5] lowercase__ : Any = len(train_data) lowercase__ : Optional[int] = 0.009 def A_ ( snake_case : Dict , snake_case : Optional[int]="train" ) -> Union[str, Any]: '''simple docstring''' return calculate_hypothesis_value(snake_case , snake_case ) - output( snake_case , snake_case ) def A_ ( snake_case : Optional[Any] ) -> Optional[int]: '''simple docstring''' __UpperCamelCase = 0 for i in range(len(snake_case ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def A_ ( snake_case : int , snake_case : Tuple ) -> Union[str, Any]: '''simple docstring''' if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def A_ ( snake_case : Tuple , snake_case : str ) -> Dict: '''simple docstring''' if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def A_ ( snake_case : List[Any] , snake_case : int=m ) -> int: '''simple docstring''' __UpperCamelCase = 0 for i in range(snake_case ): if index == -1: summation_value += _error(snake_case ) else: summation_value += _error(snake_case ) * train_data[i][0][index] return summation_value def A_ ( snake_case : Optional[Any] ) -> Dict: '''simple docstring''' __UpperCamelCase = summation_of_cost_derivative(snake_case , snake_case ) / m return cost_derivative_value def A_ ( ) -> Union[str, Any]: '''simple docstring''' global parameter_vector # Tune these values to set a tolerance value for predicted output __UpperCamelCase = 0.000002 __UpperCamelCase = 0 __UpperCamelCase = 0 while True: j += 1 __UpperCamelCase = [0, 0, 0, 0] for i in range(0 , len(snake_case ) ): __UpperCamelCase = get_cost_derivative(i - 1 ) __UpperCamelCase = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( snake_case , snake_case , atol=snake_case , rtol=snake_case , ): break __UpperCamelCase = temp_parameter_vector print(('''Number of iterations:''', j) ) def A_ ( ) -> Union[str, Any]: '''simple docstring''' for i in range(len(snake_case ) ): print(('''Actual output value:''', output(snake_case , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(snake_case , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
328
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase__ : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = size if size is not None else {'''shortest_edge''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = crop_pct __UpperCamelCase = resample __UpperCamelCase = do_center_crop __UpperCamelCase = crop_size __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) if crop_pct is not None: if "shortest_edge" in size: __UpperCamelCase = int(size['''shortest_edge'''] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __UpperCamelCase = int(size['''height'''] / crop_pct ) else: __UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct )) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) else: if "shortest_edge" in size: __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ ) elif "height" in size and "width" in size: __UpperCamelCase = (size['''height'''], size['''width''']) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str: '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image: '''simple docstring''' __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = image_mean if image_mean is not None else self.image_mean __UpperCamelCase = image_std if image_std is not None else self.image_std __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else self.crop_size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_pct is None: raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: __UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
328
1
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @slow @require_torch def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' ) __UpperCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' ) __UpperCamelCase = bertabert.config.encoder.vocab_size __UpperCamelCase = tokenizer.sep_token_id __UpperCamelCase = tokenizer.cls_token_id __UpperCamelCase = 128 __UpperCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' ) __UpperCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' ) __UpperCamelCase = train_dataset.select(range(32 ) ) __UpperCamelCase = val_dataset.select(range(16 ) ) __UpperCamelCase = 4 def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE_ ): # Tokenizer will automatically set [BOS] <text> [EOS] __UpperCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE_ , max_length=512 ) __UpperCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE_ , max_length=128 ) __UpperCamelCase = inputs.input_ids __UpperCamelCase = inputs.attention_mask __UpperCamelCase = outputs.input_ids __UpperCamelCase = outputs.input_ids.copy() __UpperCamelCase = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels'''] ] __UpperCamelCase = outputs.attention_mask assert all(len(SCREAMING_SNAKE_CASE_ ) == 512 for x in inputs.input_ids ) assert all(len(SCREAMING_SNAKE_CASE_ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = pred.label_ids __UpperCamelCase = pred.predictions # all unnecessary tokens are removed __UpperCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE_ ) )] ) / len(SCREAMING_SNAKE_CASE_ ) return {"accuracy": accuracy} # map train dataset __UpperCamelCase = train_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , remove_columns=['''article''', '''highlights'''] , ) train_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) # same for validation dataset __UpperCamelCase = val_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , remove_columns=['''article''', '''highlights'''] , ) val_dataset.set_format( type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , ) __UpperCamelCase = self.get_auto_remove_tmp_dir() __UpperCamelCase = SeqaSeqTrainingArguments( output_dir=SCREAMING_SNAKE_CASE_ , per_device_train_batch_size=SCREAMING_SNAKE_CASE_ , per_device_eval_batch_size=SCREAMING_SNAKE_CASE_ , predict_with_generate=SCREAMING_SNAKE_CASE_ , evaluation_strategy='''steps''' , do_train=SCREAMING_SNAKE_CASE_ , do_eval=SCREAMING_SNAKE_CASE_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer __UpperCamelCase = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE_ , args=SCREAMING_SNAKE_CASE_ , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE_ , eval_dataset=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , ) # start training trainer.train()
328
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowercase__ : Any = getLogger(__name__) lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu" def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict: '''simple docstring''' __UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' ) __UpperCamelCase = str(snake_case ) __UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case ) if fpaa: __UpperCamelCase = model.half() __UpperCamelCase = AutoTokenizer.from_pretrained(snake_case ) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. __UpperCamelCase = time.time() # update config with task specific params use_task_specific_params(snake_case , snake_case ) if prefix is None: __UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ): __UpperCamelCase = [prefix + text for text in examples_chunk] __UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case ) __UpperCamelCase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , ) __UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() __UpperCamelCase = int(time.time() - start_time ) # seconds __UpperCamelCase = len(snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def A_ ( ) -> Tuple: '''simple docstring''' return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def A_ ( snake_case : str=True ) -> int: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __UpperCamelCase , __UpperCamelCase = parser.parse_known_args() __UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case ) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}" ) __UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __UpperCamelCase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) __UpperCamelCase = generate_summaries_or_translations( snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , ) if args.reference_path is None: return {} # Compute scores __UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge __UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()] __UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )] __UpperCamelCase = score_fn(snake_case , snake_case ) scores.update(snake_case ) if args.dump_args: scores.update(snake_case ) if args.info: __UpperCamelCase = args.info if verbose: print(snake_case ) if args.score_path is not None: json.dump(snake_case , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
328
1
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def A_ ( snake_case : Any , snake_case : Optional[int] ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = torch.load(snake_case , map_location='''cpu''' ) __UpperCamelCase = chkpt['''model'''] # We have the base model one level deeper than the original XLM repository __UpperCamelCase = {} for k, v in state_dict.items(): if "pred_layer" in k: __UpperCamelCase = v else: __UpperCamelCase = v __UpperCamelCase = chkpt['''params'''] __UpperCamelCase = {n: v for n, v in config.items() if not isinstance(snake_case , (torch.FloatTensor, numpy.ndarray) )} __UpperCamelCase = chkpt['''dico_word2id'''] __UpperCamelCase = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()} # Save pytorch-model __UpperCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME __UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME __UpperCamelCase = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file'''] print(f"Save PyTorch model to {pytorch_weights_dump_path}" ) torch.save(snake_case , snake_case ) print(f"Save configuration file to {pytorch_config_dump_path}" ) with open(snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case , indent=2 ) + '''\n''' ) print(f"Save vocab file to {pytorch_config_dump_path}" ) with open(snake_case , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(snake_case , indent=2 ) + '''\n''' ) if __name__ == "__main__": lowercase__ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowercase__ : Optional[Any] = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
328
from math import factorial def A_ ( snake_case : int = 100 ) -> int: '''simple docstring''' return sum(int(snake_case ) for x in str(factorial(snake_case ) ) ) if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
328
1
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def A_ ( snake_case : Tuple , snake_case : List[str] , snake_case : str=0 ) -> Optional[int]: '''simple docstring''' if name is None: __UpperCamelCase = None else: __UpperCamelCase = '''.''' * max(0 , spaces - 2 ) + '''# {:''' + str(50 - spaces ) + '''s}''' __UpperCamelCase = fmt.format(snake_case ) # Print and recurse (if needed). if isinstance(snake_case , snake_case ): if msg is not None: print(snake_case ) for k in val.keys(): recursive_print(snake_case , val[k] , spaces + 2 ) elif isinstance(snake_case , torch.Tensor ): print(snake_case , ''':''' , val.size() ) else: print(snake_case , ''':''' , snake_case ) def A_ ( snake_case : int , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : List[str] ) -> int: '''simple docstring''' __UpperCamelCase = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] __UpperCamelCase = (num_heads, hidden_size, num_splits) + input_shape[1:] __UpperCamelCase = param.view(*snake_case ) __UpperCamelCase = param.transpose(0 , 2 ) __UpperCamelCase = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] __UpperCamelCase = (num_heads, num_splits, hidden_size) + input_shape[1:] __UpperCamelCase = param.view(*snake_case ) __UpperCamelCase = param.transpose(0 , 1 ).contiguous() __UpperCamelCase = param.view(*snake_case ) return param def A_ ( snake_case : List[Any] , snake_case : List[str] , snake_case : Tuple ) -> List[str]: '''simple docstring''' __UpperCamelCase = {} # old versions did not store training args __UpperCamelCase = input_state_dict.get('''args''' , snake_case ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) __UpperCamelCase = ds_args.padded_vocab_size __UpperCamelCase = ds_args.max_position_embeddings __UpperCamelCase = ds_args.hidden_size __UpperCamelCase = ds_args.num_layers __UpperCamelCase = ds_args.num_attention_heads __UpperCamelCase = ds_args.ffn_hidden_size # pprint(config) # The number of heads. __UpperCamelCase = config.n_head # The hidden_size per head. __UpperCamelCase = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): __UpperCamelCase = input_state_dict['''checkpoint_version'''] else: __UpperCamelCase = 0.0 # The model. __UpperCamelCase = input_state_dict['''model'''] # The language model. __UpperCamelCase = model['''language_model'''] # The embeddings. __UpperCamelCase = lm['''embedding'''] # The word embeddings. __UpperCamelCase = embeddings['''word_embeddings''']['''weight'''] # Truncate the embedding table to vocab_size rows. __UpperCamelCase = word_embeddings[: config.vocab_size, :] __UpperCamelCase = word_embeddings # The position embeddings. __UpperCamelCase = embeddings['''position_embeddings''']['''weight'''] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] __UpperCamelCase = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" ) # Store the position embeddings. __UpperCamelCase = pos_embeddings # The transformer. __UpperCamelCase = lm['''transformer'''] if '''transformer''' in lm.keys() else lm['''encoder'''] # The regex to extract layer names. __UpperCamelCase = re.compile(r'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''' ) # The simple map of names for "automated" rules. __UpperCamelCase = { '''attention.dense''': '''.attn.c_proj.''', '''self_attention.dense''': '''.attn.c_proj.''', '''mlp.dense_h_to_4h''': '''.mlp.c_fc.''', '''mlp.dense_4h_to_h''': '''.mlp.c_proj.''', } # Extract the layers. for key, val in transformer.items(): # Match the name. __UpperCamelCase = layer_re.match(snake_case ) # Stop if that's not a layer if m is None: break # The index of the layer. __UpperCamelCase = int(m.group(1 ) ) # The name of the operation. __UpperCamelCase = m.group(2 ) # Is it a weight or a bias? __UpperCamelCase = m.group(3 ) # The name of the layer. __UpperCamelCase = f"transformer.h.{layer_idx}" # For layernorm(s), simply store the layer norm. if op_name.endswith('''layernorm''' ): __UpperCamelCase = '''ln_1''' if op_name.startswith('''input''' ) else '''ln_2''' __UpperCamelCase = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. __UpperCamelCase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , snake_case , snake_case ) __UpperCamelCase = causal_mask # Insert a "dummy" tensor for masked_bias. __UpperCamelCase = torch.tensor(-1e4 , dtype=torch.floataa ) __UpperCamelCase = masked_bias __UpperCamelCase = fix_query_key_value_ordering(snake_case , snake_case , 3 , snake_case , snake_case ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. __UpperCamelCase = out_val.transpose(0 , 1 ).contiguous() # Store. __UpperCamelCase = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": __UpperCamelCase = fix_query_key_value_ordering(snake_case , snake_case , 3 , snake_case , snake_case ) # Store. No change of shape. __UpperCamelCase = out_val # Transpose the weights. elif weight_or_bias == "weight": __UpperCamelCase = megatron_to_transformers[op_name] __UpperCamelCase = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": __UpperCamelCase = megatron_to_transformers[op_name] __UpperCamelCase = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. __UpperCamelCase = transformer['''final_layernorm.weight'''] __UpperCamelCase = transformer['''final_layernorm.bias'''] # For LM head, transformers' wants the matrix to weight embeddings. __UpperCamelCase = word_embeddings # It should be done! return output_state_dict def A_ ( ) -> int: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''' ) parser.add_argument( '''path_to_checkpoint''' , type=snake_case , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , ) parser.add_argument( '''--config_file''' , default='''''' , type=snake_case , help='''An optional config json file describing the pre-trained model.''' , ) __UpperCamelCase = parser.parse_args() # Extract the basename. __UpperCamelCase = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" ) if args.path_to_checkpoint.endswith('''.zip''' ): with zipfile.ZipFile(args.path_to_checkpoint , '''r''' ) as checkpoint: with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''' ) as pytorch_dict: __UpperCamelCase = torch.load(snake_case , map_location='''cpu''' ) else: __UpperCamelCase = torch.load(args.path_to_checkpoint , map_location='''cpu''' ) __UpperCamelCase = input_state_dict.get('''args''' , snake_case ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: __UpperCamelCase = '''gelu_fast''' elif ds_args.openai_gelu: __UpperCamelCase = '''gelu_new''' else: __UpperCamelCase = '''gelu''' else: # in the very early days this used to be "gelu_new" __UpperCamelCase = '''gelu_new''' # Spell out all parameters in case the defaults change. __UpperCamelCase = GPTaConfig( vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=snake_case , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='''cls_index''' , summary_use_proj=snake_case , summary_activation=snake_case , summary_proj_to_labels=snake_case , summary_first_dropout=0.1 , scale_attn_weights=snake_case , use_cache=snake_case , bos_token_id=50256 , eos_token_id=50256 , ) else: __UpperCamelCase = GPTaConfig.from_json_file(args.config_file ) __UpperCamelCase = ['''GPT2LMHeadModel'''] # Convert. print('''Converting''' ) __UpperCamelCase = convert_megatron_checkpoint(snake_case , snake_case , snake_case ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(snake_case , snake_case ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: __UpperCamelCase = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": __UpperCamelCase = '''gpt2''' elif tokenizer_type == "PretrainedFromHF": __UpperCamelCase = ds_args.tokenizer_name_or_path else: raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" ) else: __UpperCamelCase = '''gpt2''' __UpperCamelCase = AutoTokenizer.from_pretrained(snake_case ) __UpperCamelCase = type(snake_case ).__name__ __UpperCamelCase = tokenizer_class # Store the config to file. print('''Saving config''' ) config.save_pretrained(snake_case ) # Save tokenizer based on args print(f"Adding {tokenizer_class} tokenizer files" ) tokenizer.save_pretrained(snake_case ) # Store the state_dict to file. __UpperCamelCase = os.path.join(snake_case , '''pytorch_model.bin''' ) print(f"Saving checkpoint to \"{output_checkpoint_file}\"" ) torch.save(snake_case , snake_case ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
328
def A_ ( snake_case : list ) -> list: '''simple docstring''' __UpperCamelCase = len(snake_case ) for i in range(1 , snake_case ): __UpperCamelCase = collection[i] __UpperCamelCase = 0 __UpperCamelCase = i - 1 while low <= high: __UpperCamelCase = (low + high) // 2 if val < collection[mid]: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 for j in range(snake_case , snake_case , -1 ): __UpperCamelCase = collection[j - 1] __UpperCamelCase = val return collection if __name__ == "__main__": lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip() lowercase__ : str = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
328
1
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" @register_to_config def __init__( self , *, SCREAMING_SNAKE_CASE_ = 4 , SCREAMING_SNAKE_CASE_ = 768 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> Optional[int]: '''simple docstring''' super().__init__() __UpperCamelCase = nn.Parameter(torch.zeros(SCREAMING_SNAKE_CASE_ ) ) # parameters for additional clip time embeddings __UpperCamelCase = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # parameters for encoder hidden states __UpperCamelCase = clip_extra_context_tokens __UpperCamelCase = nn.Linear( SCREAMING_SNAKE_CASE_ , self.clip_extra_context_tokens * cross_attention_dim ) __UpperCamelCase = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = nn.LayerNorm(SCREAMING_SNAKE_CASE_ ) def A__ ( self , *, SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings __UpperCamelCase = image_embeddings.shape[0] __UpperCamelCase = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) __UpperCamelCase = classifier_free_guidance_embeddings.expand( SCREAMING_SNAKE_CASE_ , -1 ) __UpperCamelCase = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] __UpperCamelCase = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... __UpperCamelCase = self.embedding_proj(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.clip_image_embeddings_project_to_time_embeddings(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" __UpperCamelCase = self.clip_extra_context_tokens_proj(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = clip_extra_context_tokens.reshape(SCREAMING_SNAKE_CASE_ , -1 , self.clip_extra_context_tokens ) __UpperCamelCase = clip_extra_context_tokens.permute(0 , 2 , 1 ) __UpperCamelCase = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.text_encoder_hidden_states_norm(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
328
from __future__ import annotations from collections import deque class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' __UpperCamelCase = [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(SCREAMING_SNAKE_CASE_ ) self.set_fail_transitions() def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int | None: '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' __UpperCamelCase = 0 for character in keyword: __UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __UpperCamelCase = len(self.adlist ) - 1 else: __UpperCamelCase = next_state self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> None: '''simple docstring''' __UpperCamelCase = deque() for node in self.adlist[0]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = 0 while q: __UpperCamelCase = q.popleft() for child in self.adlist[r]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.adlist[r]['''fail_state'''] while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) is None and state != 0 ): __UpperCamelCase = self.adlist[state]['''fail_state'''] __UpperCamelCase = self.find_next_state( SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: __UpperCamelCase = 0 __UpperCamelCase = ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict[str, list[int]]: '''simple docstring''' __UpperCamelCase = {} # returns a dict with keywords and list of its occurrences __UpperCamelCase = 0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None and current_state != 0 ): __UpperCamelCase = self.adlist[current_state]['''fail_state'''] __UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) if next_state is None: __UpperCamelCase = 0 else: __UpperCamelCase = next_state for key in self.adlist[current_state]["output"]: if key not in result: __UpperCamelCase = [] result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
328
1
import glob import os import random from string import ascii_lowercase, digits import cva lowercase__ : List[str] = "" lowercase__ : Optional[Any] = "" lowercase__ : str = "" lowercase__ : Tuple = 1 # (0 is vertical, 1 is horizontal) def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = get_dataset(snake_case , snake_case ) print('''Processing...''' ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = update_image_and_anno(snake_case , snake_case , snake_case ) for index, image in enumerate(snake_case ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __UpperCamelCase = random_chars(32 ) __UpperCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0] __UpperCamelCase = f"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}" cva.imwrite(f"/{file_root}.jpg" , snake_case , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"Success {index+1}/{len(snake_case )} with {file_name}" ) __UpperCamelCase = [] for anno in new_annos[index]: __UpperCamelCase = f"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}" annos_list.append(snake_case ) with open(f"/{file_root}.txt" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def A_ ( snake_case : str , snake_case : str ) -> tuple[list, list]: '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = [] for label_file in glob.glob(os.path.join(snake_case , '''*.txt''' ) ): __UpperCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(snake_case ) as in_file: __UpperCamelCase = in_file.readlines() __UpperCamelCase = os.path.join(snake_case , f"{label_name}.jpg" ) __UpperCamelCase = [] for obj_list in obj_lists: __UpperCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(snake_case ) labels.append(snake_case ) return img_paths, labels def A_ ( snake_case : list , snake_case : list , snake_case : int = 1 ) -> tuple[list, list, list]: '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = [] __UpperCamelCase = [] for idx in range(len(snake_case ) ): __UpperCamelCase = [] __UpperCamelCase = img_list[idx] path_list.append(snake_case ) __UpperCamelCase = anno_list[idx] __UpperCamelCase = cva.imread(snake_case ) if flip_type == 1: __UpperCamelCase = cva.flip(snake_case , snake_case ) for bbox in img_annos: __UpperCamelCase = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __UpperCamelCase = cva.flip(snake_case , snake_case ) for bbox in img_annos: __UpperCamelCase = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(snake_case ) new_imgs_list.append(snake_case ) return new_imgs_list, new_annos_lists, path_list def A_ ( snake_case : int = 32 ) -> str: '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" __UpperCamelCase = ascii_lowercase + digits return "".join(random.choice(snake_case ) for _ in range(snake_case ) ) if __name__ == "__main__": main() print("DONE ✅")
328
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , )-> Dict: '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = num_labels __UpperCamelCase = num_choices __UpperCamelCase = scope def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self )-> str: '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' __UpperCamelCase = DistilBertModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' __UpperCamelCase = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.num_choices __UpperCamelCase = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs __UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _snake_case = ( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _snake_case = True _snake_case = True _snake_case = True _snake_case = True def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = DistilBertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 ) def A__ ( self )-> Dict: '''simple docstring''' self.config_tester.run_common_tests() def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) @slow def A__ ( self )-> List[str]: '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @slow @require_torch_gpu def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __UpperCamelCase = True __UpperCamelCase = model_class(config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.jit.trace( SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) ) __UpperCamelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ ) loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @slow def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] __UpperCamelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.tensor( [[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
328
1
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase__ : Any = random.Random() if is_torch_available(): import torch def A_ ( snake_case : List[Any] , snake_case : str=1.0 , snake_case : Tuple=None , snake_case : Any=None ) -> Optional[Any]: '''simple docstring''' if rng is None: __UpperCamelCase = global_rng __UpperCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=400 , SCREAMING_SNAKE_CASE_=2000 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=16000 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , )-> Tuple: '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = min_seq_length __UpperCamelCase = max_seq_length __UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) __UpperCamelCase = feature_size __UpperCamelCase = padding_value __UpperCamelCase = sampling_rate __UpperCamelCase = return_attention_mask __UpperCamelCase = do_normalize def A__ ( self )-> List[Any]: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def A__ ( self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False )-> Union[str, Any]: '''simple docstring''' def _flatten(SCREAMING_SNAKE_CASE_ ): return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) ) if equal_length: __UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size __UpperCamelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: __UpperCamelCase = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = ASTFeatureExtractor def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = ASTFeatureExtractionTester(self ) def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 __UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] __UpperCamelCase = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs] # Test not batched input __UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values __UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) # Test batched __UpperCamelCase = feat_extract(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_values __UpperCamelCase = feat_extract(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. __UpperCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)] __UpperCamelCase = np.asarray(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_values __UpperCamelCase = feat_extract(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) @require_torch def A__ ( self )-> str: '''simple docstring''' import torch __UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) __UpperCamelCase = np.random.rand(100 ).astype(np.floataa ) __UpperCamelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: __UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) __UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' from datasets import load_dataset __UpperCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech __UpperCamelCase = ds.sort('''id''' ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] @require_torch def A__ ( self )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] ) # fmt: on __UpperCamelCase = self._load_datasamples(1 ) __UpperCamelCase = ASTFeatureExtractor() __UpperCamelCase = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
328
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed lowercase__ : Optional[Any] = logging.getLogger(__name__) def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int: '''simple docstring''' def get_dataset(snake_case : Optional[int] ): __UpperCamelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __UpperCamelCase = get_dataset(snake_case ) __UpperCamelCase = get_dataset(snake_case ) __UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) __UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any: '''simple docstring''' __UpperCamelCase = [] for epoch in range(snake_case ): # Train quickly model.train() for batch in dataloader: __UpperCamelCase , __UpperCamelCase = batch __UpperCamelCase = model(snake_case ) __UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case ) accelerator.backward(snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self )-> Tuple: '''simple docstring''' super().__init__() __UpperCamelCase = nn.Parameter(torch.randn(1 ) ) __UpperCamelCase = nn.Parameter(torch.randn(1 ) ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' return x * self.a + self.b class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> Tuple: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline __UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def A__ ( self )-> Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() # Train baseline __UpperCamelCase = Accelerator() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial __UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() __UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() # Train partially set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = Accelerator() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) accelerator.load_state(SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save everything __UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) # Load everything back in and make sure all states work accelerator.load_state(SCREAMING_SNAKE_CASE_ ) test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> List[str]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() __UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() # Train partially set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = torch.tensor([1, 2, 3] ) __UpperCamelCase = torch.tensor([2, 3, 4] ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(net.parameters() ) __UpperCamelCase = Accelerator() with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve: accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() __UpperCamelCase = scheduler.state_dict() train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() ) def A__ ( self )-> List[str]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 ) # Train baseline __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) if __name__ == "__main__": lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing" lowercase__ : List[Any] = DummyModel() lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3) lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) lowercase__ , lowercase__ : str = dummy_dataloaders() lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: lowercase__ : int = group["params"][0].device break assert param_device.type == accelerator.device.type lowercase__ : Union[str, Any] = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: lowercase__ : Any = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: lowercase__ : List[Any] = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
328
1
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowercase__ : str = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") lowercase__ : Union[str, Any] = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeqaSeqLM, "translation": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowercase__ : Optional[Any] = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowercase__ : Optional[int] = sorted(arg_to_scheduler.keys()) lowercase__ : Dict = "{" + ", ".join(arg_to_scheduler_choices) + "}" class SCREAMING_SNAKE_CASE__ ( pl.LightningModule ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="base" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , )-> List[Any]: '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = 0 __UpperCamelCase = Path(self.hparams.output_dir ) __UpperCamelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: __UpperCamelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) else: __UpperCamelCase = config __UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): assert hasattr(self.config , SCREAMING_SNAKE_CASE_ ), F"model config doesn't have a `{p}` attribute" setattr(self.config , SCREAMING_SNAKE_CASE_ , getattr(self.hparams , SCREAMING_SNAKE_CASE_ ) ) if tokenizer is None: __UpperCamelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=SCREAMING_SNAKE_CASE_ , ) else: __UpperCamelCase = tokenizer __UpperCamelCase = MODEL_MODES[mode] if model is None: __UpperCamelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=SCREAMING_SNAKE_CASE_ , ) else: __UpperCamelCase = model def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = arg_to_scheduler[self.hparams.lr_scheduler] __UpperCamelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) __UpperCamelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = self.model __UpperCamelCase = ['''bias''', '''LayerNorm.weight'''] __UpperCamelCase = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: __UpperCamelCase = Adafactor( SCREAMING_SNAKE_CASE_ , lr=self.hparams.learning_rate , scale_parameter=SCREAMING_SNAKE_CASE_ , relative_step=SCREAMING_SNAKE_CASE_ ) else: __UpperCamelCase = AdamW( SCREAMING_SNAKE_CASE_ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) __UpperCamelCase = optimizer __UpperCamelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' return self.validation_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' return self.validation_end(SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores __UpperCamelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def A__ ( self , SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' if stage == "test": __UpperCamelCase = len(self.test_dataloader().dataset ) else: __UpperCamelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = len(self.train_dataloader().dataset ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False )-> Union[str, Any]: '''simple docstring''' raise NotImplementedError('''You must implement this for your task''' ) def A__ ( self )-> int: '''simple docstring''' return self.train_loader def A__ ( self )-> List[Any]: '''simple docstring''' return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Optional[Any]: '''simple docstring''' return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( SCREAMING_SNAKE_CASE_ , list(filter(SCREAMING_SNAKE_CASE_ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' __UpperCamelCase = self.output_dir.joinpath('''best_tfmr''' ) __UpperCamelCase = self.step_count self.model.save_pretrained(SCREAMING_SNAKE_CASE_ ) self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ ) @staticmethod def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' parser.add_argument( '''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=SCREAMING_SNAKE_CASE_ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / '''test_run''' / '''cache''' ) , type=SCREAMING_SNAKE_CASE_ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=SCREAMING_SNAKE_CASE_ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=SCREAMING_SNAKE_CASE_ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=SCREAMING_SNAKE_CASE_ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=SCREAMING_SNAKE_CASE_ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=SCREAMING_SNAKE_CASE_ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=SCREAMING_SNAKE_CASE_ , metavar=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=SCREAMING_SNAKE_CASE_ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=SCREAMING_SNAKE_CASE_ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=SCREAMING_SNAKE_CASE_ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=SCREAMING_SNAKE_CASE_ ) parser.add_argument('''--train_batch_size''' , default=32 , type=SCREAMING_SNAKE_CASE_ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=SCREAMING_SNAKE_CASE_ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class SCREAMING_SNAKE_CASE__ ( pl.Callback ): """simple docstring""" def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class SCREAMING_SNAKE_CASE__ ( pl.Callback ): """simple docstring""" def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(SCREAMING_SNAKE_CASE_ ) class SCREAMING_SNAKE_CASE__ ( pl.Callback ): """simple docstring""" def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = trainer.lr_schedulers[0]['''scheduler'''] __UpperCamelCase = {F"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' rank_zero_info('''***** Validation results *****''' ) __UpperCamelCase = trainer.callback_metrics # Log results for key in sorted(SCREAMING_SNAKE_CASE_ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(SCREAMING_SNAKE_CASE_ , str(metrics[key] ) ) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' rank_zero_info('''***** Test results *****''' ) __UpperCamelCase = trainer.callback_metrics # Log and save results to file __UpperCamelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as writer: for key in sorted(SCREAMING_SNAKE_CASE_ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(SCREAMING_SNAKE_CASE_ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(SCREAMING_SNAKE_CASE_ , str(metrics[key] ) ) ) def A_ ( snake_case : str , snake_case : Optional[Any] ) -> None: '''simple docstring''' parser.add_argument( '''--output_dir''' , default=str(Path(snake_case ).parent / '''test_run''' / '''model_checkpoints''' ) , type=snake_case , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=snake_case , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=snake_case ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=snake_case , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=snake_case , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=snake_case , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(snake_case ).parent / '''test_run''' / '''dummy-train-data''' ) , type=snake_case , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def A_ ( snake_case : BaseTransformer , snake_case : argparse.Namespace , snake_case : int=None , snake_case : List[Any]=True , snake_case : List[str]=[] , snake_case : int=None , snake_case : List[str]=None , **snake_case : List[Any] , ) -> Optional[int]: '''simple docstring''' pl.seed_everything(args.seed ) # init model __UpperCamelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=snake_case ) # add custom checkpoints if checkpoint_callback is None: __UpperCamelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(snake_case ) if logging_callback is None: __UpperCamelCase = LoggingCallback() __UpperCamelCase = {} if args.fpaa: __UpperCamelCase = 16 if args.gpus > 1: __UpperCamelCase = '''auto''' __UpperCamelCase = '''ddp''' __UpperCamelCase = args.accumulate_grad_batches __UpperCamelCase = None __UpperCamelCase = '''auto''' __UpperCamelCase = pl.Trainer.from_argparse_args( snake_case , weights_summary=snake_case , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=snake_case , val_check_interval=1 , num_sanity_val_steps=2 , **snake_case , ) if args.do_train: trainer.fit(snake_case ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
328
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]: '''simple docstring''' super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = Sql( cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , ) # Build dataset for splits __UpperCamelCase = self.builder.as_dataset( split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory ) return dataset class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]: '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F"num_proc {num_proc} must be an integer > 0." ) __UpperCamelCase = dataset __UpperCamelCase = name __UpperCamelCase = con __UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __UpperCamelCase = num_proc __UpperCamelCase = to_sql_kwargs def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs ) return written def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args __UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __UpperCamelCase = query_table( table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __UpperCamelCase = batch.to_pandas() __UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return num_rows or len(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' __UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
328
1
from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
328
def A_ ( snake_case : str ) -> int: '''simple docstring''' assert column_title.isupper() __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 1 __UpperCamelCase = 0 while index >= 0: __UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
328
1
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' __UpperCamelCase = 3 __UpperCamelCase = 250 __UpperCamelCase = ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float ) / length return input_ids, scores def A__ ( self )-> Optional[Any]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self._get_tensors(5 ) __UpperCamelCase = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase , __UpperCamelCase = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase , __UpperCamelCase = self._get_tensors(10 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = MaxLengthCriteria(max_length=10 ) __UpperCamelCase , __UpperCamelCase = self._get_tensors(5 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase , __UpperCamelCase = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase , __UpperCamelCase = self._get_tensors(10 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __UpperCamelCase , __UpperCamelCase = self._get_tensors(5 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase , __UpperCamelCase = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase , __UpperCamelCase = self._get_tensors(10 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self._get_tensors(5 ) __UpperCamelCase = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(SCREAMING_SNAKE_CASE_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __UpperCamelCase = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
328
def A_ ( snake_case : int ) -> None: '''simple docstring''' __UpperCamelCase = generate_pascal_triangle(snake_case ) for row_idx in range(snake_case ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=''' ''' ) else: print(triangle[row_idx][col_idx] , end='''''' ) print() def A_ ( snake_case : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __UpperCamelCase = [] for current_row_idx in range(snake_case ): __UpperCamelCase = populate_current_row(snake_case , snake_case ) triangle.append(snake_case ) return triangle def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]: '''simple docstring''' __UpperCamelCase = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 __UpperCamelCase , __UpperCamelCase = 1, 1 for current_col_idx in range(1 , snake_case ): calculate_current_element( snake_case , snake_case , snake_case , snake_case ) return current_row def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None: '''simple docstring''' __UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1] __UpperCamelCase = triangle[current_row_idx - 1][current_col_idx] __UpperCamelCase = above_to_left_elt + above_to_right_elt def A_ ( snake_case : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __UpperCamelCase = [[1]] for row_index in range(1 , snake_case ): __UpperCamelCase = [0] + result[-1] + [0] __UpperCamelCase = row_index + 1 # Calculate the number of distinct elements in a row __UpperCamelCase = sum(divmod(snake_case , 2 ) ) __UpperCamelCase = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] __UpperCamelCase = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() __UpperCamelCase = row_first_half + row_second_half result.append(snake_case ) return result def A_ ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None: __UpperCamelCase = f"{func.__name__}({value})" __UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(snake_case , snake_case ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
328
1
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType lowercase__ : Optional[List[str]] = None lowercase__ : List[str] = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image lowercase__ : Union[str, Any] = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" _snake_case = True _snake_case = None # Automatically constructed _snake_case = "PIL.Image.Image" _snake_case = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) _snake_case = field(default='Image' , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ ) def __call__( self )-> List[str]: '''simple docstring''' return self.pa_type def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return {"path": value, "bytes": None} elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return {"path": None, "bytes": value} elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(SCREAMING_SNAKE_CASE_ ) elif isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(SCREAMING_SNAKE_CASE_ ) elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( F"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> "PIL.Image.Image": '''simple docstring''' if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''' ) if token_per_repo_id is None: __UpperCamelCase = {} __UpperCamelCase , __UpperCamelCase = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(F"An image should have one of 'path' or 'bytes' but both are None in {value}." ) else: if is_local_path(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = PIL.Image.open(SCREAMING_SNAKE_CASE_ ) else: __UpperCamelCase = path.split('''::''' )[-1] try: __UpperCamelCase = string_to_dict(SCREAMING_SNAKE_CASE_ , config.HUB_DATASETS_URL )['''repo_id'''] __UpperCamelCase = token_per_repo_id.get(SCREAMING_SNAKE_CASE_ ) except ValueError: __UpperCamelCase = None with xopen(SCREAMING_SNAKE_CASE_ , '''rb''' , use_auth_token=SCREAMING_SNAKE_CASE_ ) as f: __UpperCamelCase = BytesIO(f.read() ) __UpperCamelCase = PIL.Image.open(bytes_ ) else: __UpperCamelCase = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def A__ ( self )-> Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value('''binary''' ), "path": Value('''string''' ), } ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.StructArray: '''simple docstring''' if pa.types.is_string(storage.type ): __UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() ) __UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): __UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() ) __UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: __UpperCamelCase = storage.field('''bytes''' ) else: __UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: __UpperCamelCase = storage.field('''path''' ) else: __UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() ) __UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): __UpperCamelCase = pa.array( [encode_np_array(np.array(SCREAMING_SNAKE_CASE_ ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) __UpperCamelCase = pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=pa.string() ) __UpperCamelCase = pa.StructArray.from_arrays( [bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.StructArray: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(SCREAMING_SNAKE_CASE_ ): with xopen(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f: __UpperCamelCase = f.read() return bytes_ __UpperCamelCase = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) __UpperCamelCase = pa.array( [os.path.basename(SCREAMING_SNAKE_CASE_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) __UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(SCREAMING_SNAKE_CASE_ , self.pa_type ) def A_ ( ) -> List[str]: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() __UpperCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def A_ ( snake_case : "PIL.Image.Image" ) -> bytes: '''simple docstring''' __UpperCamelCase = BytesIO() if image.format in list_image_compression_formats(): __UpperCamelCase = image.format else: __UpperCamelCase = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(snake_case , format=snake_case ) return buffer.getvalue() def A_ ( snake_case : "PIL.Image.Image" ) -> dict: '''simple docstring''' if hasattr(snake_case , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(snake_case )} def A_ ( snake_case : np.ndarray ) -> dict: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) __UpperCamelCase = array.dtype __UpperCamelCase = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER __UpperCamelCase = dtype.kind __UpperCamelCase = dtype.itemsize __UpperCamelCase = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: __UpperCamelCase = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." ) if dtype is not dest_dtype: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: __UpperCamelCase = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: __UpperCamelCase = dtype_byteorder + dtype_kind + str(snake_case ) __UpperCamelCase = np.dtype(snake_case ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" ) __UpperCamelCase = PIL.Image.fromarray(array.astype(snake_case ) ) return {"path": None, "bytes": image_to_bytes(snake_case )} def A_ ( snake_case : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: __UpperCamelCase , __UpperCamelCase = first_non_null_value(snake_case ) if isinstance(snake_case , snake_case ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(snake_case , np.ndarray ): __UpperCamelCase = no_op_if_value_is_null(snake_case ) return [obj_to_image_dict_func(snake_case ) for obj in objs] elif isinstance(snake_case , PIL.Image.Image ): __UpperCamelCase = no_op_if_value_is_null(snake_case ) return [obj_to_image_dict_func(snake_case ) for obj in objs] else: return objs else: return objs
328
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowercase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) lowercase__ : Any = parser.parse_args() lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowercase__ : List[str] = CLIPImageProcessor() lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
328
1
from typing import Any class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self )-> Dict: '''simple docstring''' __UpperCamelCase = None def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.head while temp is not None: print(temp.data , end=''' ''' ) __UpperCamelCase = temp.next print() def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' __UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.head __UpperCamelCase = new_node def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' if node_data_a == node_data_a: return else: __UpperCamelCase = self.head while node_a is not None and node_a.data != node_data_a: __UpperCamelCase = node_a.next __UpperCamelCase = self.head while node_a is not None and node_a.data != node_data_a: __UpperCamelCase = node_a.next if node_a is None or node_a is None: return __UpperCamelCase , __UpperCamelCase = node_a.data, node_a.data if __name__ == "__main__": lowercase__ : int = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("After swapping") ll.print_list()
328
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowercase__ : Union[str, Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" lowercase__ : Optional[Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" lowercase__ : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" lowercase__ : Optional[int] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" lowercase__ : Optional[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def A__ ( self )-> Tuple: '''simple docstring''' return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 10, 100] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3.0 )-> Union[str, Any]: '''simple docstring''' if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor: __UpperCamelCase = [] __UpperCamelCase = Counter() __UpperCamelCase = 0 __UpperCamelCase = defaultdict(SCREAMING_SNAKE_CASE_ ) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): for candidate in candidates: __UpperCamelCase = candidate + '''\n''' + test_case __UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id]) __UpperCamelCase = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) futures.append(SCREAMING_SNAKE_CASE_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) __UpperCamelCase , __UpperCamelCase = [], [] for result in results.values(): result.sort() __UpperCamelCase = [r[1]['''passed'''] for r in result] total.append(len(SCREAMING_SNAKE_CASE_ ) ) correct.append(sum(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = k __UpperCamelCase = {F"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def A_ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[Any] ) -> Optional[Any]: '''simple docstring''' def estimator(snake_case : int , snake_case : int , snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(snake_case , snake_case ): __UpperCamelCase = itertools.repeat(snake_case , len(snake_case ) ) else: assert len(snake_case ) == len(snake_case ) __UpperCamelCase = iter(snake_case ) return np.array([estimator(int(snake_case ) , int(snake_case ) , snake_case ) for n, c in zip(snake_case , snake_case )] )
328
1
def A_ ( snake_case : list ) -> list: '''simple docstring''' __UpperCamelCase = len(snake_case ) for i in range(1 , snake_case ): __UpperCamelCase = collection[i] __UpperCamelCase = 0 __UpperCamelCase = i - 1 while low <= high: __UpperCamelCase = (low + high) // 2 if val < collection[mid]: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 for j in range(snake_case , snake_case , -1 ): __UpperCamelCase = collection[j - 1] __UpperCamelCase = val return collection if __name__ == "__main__": lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip() lowercase__ : str = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
328
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) lowercase__ : Optional[Any] = ["names", "prefix"] lowercase__ : List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] lowercase__ : Optional[Any] = ["encoding_errors", "on_bad_lines"] lowercase__ : List[str] = ["date_format"] @dataclass class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ): """simple docstring""" _snake_case = "," _snake_case = None _snake_case = "infer" _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = False _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = False _snake_case = True _snake_case = None _snake_case = "." _snake_case = None _snake_case = '"' _snake_case = 0 _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = 0 _snake_case = True _snake_case = False _snake_case = None _snake_case = 10000 _snake_case = None _snake_case = "strict" _snake_case = "error" _snake_case = None def A__ ( self )-> Any: '''simple docstring''' if self.delimiter is not None: __UpperCamelCase = self.delimiter if self.column_names is not None: __UpperCamelCase = self.column_names @property def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ): """simple docstring""" _snake_case = CsvConfig def A__ ( self )-> Any: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) __UpperCamelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ): __UpperCamelCase = data_files if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __UpperCamelCase = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) ) return splits def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.Table: '''simple docstring''' if self.config.features is not None: __UpperCamelCase = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE_ ) for feature in self.config.features.values() ): # cheaper cast __UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __UpperCamelCase = table_cast(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return pa_table def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __UpperCamelCase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE_ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ): __UpperCamelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , iterator=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = pa.Table.from_pandas(SCREAMING_SNAKE_CASE_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ ) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}" ) raise
328
1
import re from filelock import FileLock try: import nltk lowercase__ : List[str] = True except (ImportError, ModuleNotFoundError): lowercase__ : Union[str, Any] = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def A_ ( snake_case : str ) -> str: '''simple docstring''' re.sub('''<n>''' , '''''' , snake_case ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(snake_case ) )
328
from __future__ import annotations import math def A_ ( snake_case : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def A_ ( snake_case : int ) -> list[int]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) __UpperCamelCase = [] for num in range(len(snake_case ) ): __UpperCamelCase = 0 while 2 * i * i <= odd_composites[num]: __UpperCamelCase = odd_composites[num] - 2 * i * i if is_prime(snake_case ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case ) == n: return list_nums return [] def A_ ( ) -> int: '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F"{solution() = }")
328
1
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = 10 def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = [1, 2, 3, 4] __UpperCamelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] __UpperCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> List[Any]: '''simple docstring''' __UpperCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] __UpperCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(SCREAMING_SNAKE_CASE_ , self.block_size , 0 ) , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = '''It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.''' __UpperCamelCase , __UpperCamelCase = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = '''''' __UpperCamelCase , __UpperCamelCase = process_story(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) self.assertEqual(SCREAMING_SNAKE_CASE_ , [] ) def A__ ( self )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = ( '''It was the year of Our Lord one thousand seven hundred and ''' '''seventy-five\n\nSpiritual revelations were conceded to England ''' '''at that favoured period, as at this.\n@highlight\n\nIt was the best of times''' ) __UpperCamelCase , __UpperCamelCase = process_story(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = [ '''It was the year of Our Lord one thousand seven hundred and seventy-five.''', '''Spiritual revelations were conceded to England at that favoured period, as at this.''', ] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = ['''It was the best of times.'''] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = torch.tensor([1, 2, 3, 4] ) __UpperCamelCase = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 0 ).numpy() , expected.numpy() ) def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) __UpperCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 23 ).numpy() , expected.numpy() ) def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) __UpperCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(SCREAMING_SNAKE_CASE_ , 1 ).numpy() , expected.numpy() ) def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = 101 __UpperCamelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) __UpperCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) __UpperCamelCase = compute_token_type_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) np.testing.assert_array_equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
328
from __future__ import annotations from collections.abc import Callable def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float: '''simple docstring''' __UpperCamelCase = x_start __UpperCamelCase = fnc(snake_case ) __UpperCamelCase = 0.0 for _ in range(snake_case ): # Approximates small segments of curve as linear and solve # for trapezoidal area __UpperCamelCase = (x_end - x_start) / steps + xa __UpperCamelCase = fnc(snake_case ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __UpperCamelCase = xa __UpperCamelCase = fxa return area if __name__ == "__main__": def A_ ( snake_case : Tuple ) -> Optional[Any]: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") lowercase__ : List[str] = 1_0 while i <= 1_0_0_0_0_0: print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}") i *= 1_0
328
1
import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ = "▁" , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = "<unk>" , SCREAMING_SNAKE_CASE_ = "</s>" , SCREAMING_SNAKE_CASE_ = "<pad>" , )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } __UpperCamelCase = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): __UpperCamelCase = token_dict['''token'''] __UpperCamelCase = Tokenizer(Unigram() ) __UpperCamelCase = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ), normalizers.Lowercase(), ] ) __UpperCamelCase = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ), pre_tokenizers.Digits(individual_digits=SCREAMING_SNAKE_CASE_ ), pre_tokenizers.Punctuation(), ] ) __UpperCamelCase = decoders.Metaspace(replacement=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = TemplateProcessing( single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) __UpperCamelCase = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 8000 , SCREAMING_SNAKE_CASE_ = True , )-> Dict: '''simple docstring''' __UpperCamelCase = trainers.UnigramTrainer( vocab_size=SCREAMING_SNAKE_CASE_ , special_tokens=self.special_tokens_list , show_progress=SCREAMING_SNAKE_CASE_ , ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] self._tokenizer.train(SCREAMING_SNAKE_CASE_ , trainer=SCREAMING_SNAKE_CASE_ ) self.add_unk_id() def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 8000 , SCREAMING_SNAKE_CASE_ = True , )-> List[str]: '''simple docstring''' __UpperCamelCase = trainers.UnigramTrainer( vocab_size=SCREAMING_SNAKE_CASE_ , special_tokens=self.special_tokens_list , show_progress=SCREAMING_SNAKE_CASE_ , ) self._tokenizer.train_from_iterator(SCREAMING_SNAKE_CASE_ , trainer=SCREAMING_SNAKE_CASE_ ) self.add_unk_id() def A__ ( self )-> List[Any]: '''simple docstring''' __UpperCamelCase = json.loads(self._tokenizer.to_str() ) __UpperCamelCase = self.special_tokens['''unk''']['''id'''] __UpperCamelCase = Tokenizer.from_str(json.dumps(SCREAMING_SNAKE_CASE_ ) )
328
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = ["model.decoder.embed_positions.weights"] def A_ ( snake_case : Any ) -> List[Any]: '''simple docstring''' if "emb" in name: __UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: __UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: __UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: __UpperCamelCase = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: __UpperCamelCase = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: __UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: __UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: __UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: __UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: __UpperCamelCase = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: __UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def A_ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]: '''simple docstring''' __UpperCamelCase = list(state_dict.keys() ) __UpperCamelCase = {} for key in keys: __UpperCamelCase = state_dict.pop(snake_case ) __UpperCamelCase = rename_keys(snake_case ) if "in_proj_weight" in key: # split fused qkv proj __UpperCamelCase = val[:hidden_size, :] __UpperCamelCase = val[hidden_size : 2 * hidden_size, :] __UpperCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __UpperCamelCase = val else: __UpperCamelCase = val return state_dict, enc_dec_proj_state_dict def A_ ( snake_case : str ) -> MusicgenDecoderConfig: '''simple docstring''' if checkpoint == "small": # default config values __UpperCamelCase = 1024 __UpperCamelCase = 24 __UpperCamelCase = 16 elif checkpoint == "medium": __UpperCamelCase = 1536 __UpperCamelCase = 48 __UpperCamelCase = 24 elif checkpoint == "large": __UpperCamelCase = 2048 __UpperCamelCase = 48 __UpperCamelCase = 32 else: raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) __UpperCamelCase = MusicgenDecoderConfig( hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , ) return config @torch.no_grad() def A_ ( snake_case : Any , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]="cpu" ) -> List[Any]: '''simple docstring''' __UpperCamelCase = MusicGen.get_pretrained(snake_case , device=snake_case ) __UpperCamelCase = decoder_config_from_checkpoint(snake_case ) __UpperCamelCase = fairseq_model.lm.state_dict() __UpperCamelCase , __UpperCamelCase = rename_state_dict( snake_case , hidden_size=decoder_config.hidden_size ) __UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' ) __UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) __UpperCamelCase = MusicgenForCausalLM(snake_case ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(snake_case , strict=snake_case ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(snake_case ) if len(snake_case ) > 0: raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" ) if len(snake_case ) > 0: raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model __UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(snake_case ) # check we can do a forward pass __UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __UpperCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __UpperCamelCase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits if logits.shape != (8, 1, 2048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor __UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) __UpperCamelCase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case ) # set the appropriate bos/pad token ids __UpperCamelCase = 2048 __UpperCamelCase = 2048 # set other default generation config params __UpperCamelCase = int(30 * audio_encoder.config.frame_rate ) __UpperCamelCase = True __UpperCamelCase = 3.0 if pytorch_dump_folder is not None: Path(snake_case ).mkdir(exist_ok=snake_case ) logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(snake_case ) processor.save_pretrained(snake_case ) if repo_id: logger.info(f"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(snake_case ) processor.push_to_hub(snake_case ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) lowercase__ : Tuple = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
328
1
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , )-> List[str]: '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = num_labels __UpperCamelCase = num_choices __UpperCamelCase = scope def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self )-> Dict: '''simple docstring''' return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=SCREAMING_SNAKE_CASE_ , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = FalconModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> str: '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = FalconModel(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> List[Any]: '''simple docstring''' __UpperCamelCase = FalconForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> str: '''simple docstring''' __UpperCamelCase = True __UpperCamelCase = True __UpperCamelCase = FalconForCausalLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # first forward pass __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['''hidden_states'''][0] __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , encoder_attention_mask=SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , )['''hidden_states'''][0] # select random slice __UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCamelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ( ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ( __UpperCamelCase ) , ) = config_and_inputs __UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) _snake_case = (FalconForCausalLM,) if is_torch_available() else () _snake_case = ( { 'feature-extraction': FalconModel, 'text-classification': FalconForSequenceClassification, 'text-generation': FalconForCausalLM, 'question-answering': FalconForQuestionAnswering, 'token-classification': FalconForTokenClassification, 'zero-shot': FalconForSequenceClassification, } if is_torch_available() else {} ) _snake_case = False _snake_case = False def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = FalconModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def A__ ( self )-> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def A__ ( self )-> List[Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase , *__UpperCamelCase = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: __UpperCamelCase = alibi self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = 3 __UpperCamelCase = input_dict['''input_ids'''] __UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCamelCase = FalconForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = 3 __UpperCamelCase = '''single_label_classification''' __UpperCamelCase = input_dict['''input_ids'''] __UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCamelCase = FalconForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A__ ( self )-> List[Any]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = input_dict['''input_ids'''] __UpperCamelCase = FalconForCausalLM(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = input_ids.shape[0] __UpperCamelCase = model._convert_to_rw_cache(result.past_key_values ) __UpperCamelCase = model._convert_cache_to_standard_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for layer in range(len(SCREAMING_SNAKE_CASE_ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def A__ ( self )-> Optional[Any]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = 3 __UpperCamelCase = '''multi_label_classification''' __UpperCamelCase = input_dict['''input_ids'''] __UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCamelCase = FalconForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' for model_class in self.all_generative_model_classes: __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(SCREAMING_SNAKE_CASE_ , '''use_cache''' ): return __UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) if "use_cache" not in inputs: __UpperCamelCase = True __UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return __UpperCamelCase = ( getattr(SCREAMING_SNAKE_CASE_ , '''decoder_layers''' , SCREAMING_SNAKE_CASE_ ) or getattr(SCREAMING_SNAKE_CASE_ , '''num_decoder_layers''' , SCREAMING_SNAKE_CASE_ ) or config.num_hidden_layers ) __UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , '''num_kv_heads''' , config.num_attention_heads ) __UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , '''d_model''' , config.hidden_size ) __UpperCamelCase = embed_dim // num_attention_heads __UpperCamelCase = outputs['''past_key_values'''] self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase = inputs['''input_ids'''].shape for i in range(SCREAMING_SNAKE_CASE_ ): if config.new_decoder_architecture: __UpperCamelCase = config.num_attention_heads elif config.multi_query: __UpperCamelCase = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @slow def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) __UpperCamelCase = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) model.eval() model.to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = ( '''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.''' ) __UpperCamelCase = model.generate(**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , max_new_tokens=19 ) __UpperCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )[0] self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def A__ ( self )-> int: '''simple docstring''' for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: __UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ ) model.eval() model.to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , max_new_tokens=4 ) model.generate(**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , max_new_tokens=4 ) model.generate(**SCREAMING_SNAKE_CASE_ , num_beams=2 , max_new_tokens=4 ) @slow def A__ ( self )-> str: '''simple docstring''' with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: __UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = FalconForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ ) model.eval() model.to(device=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # Test results are the same with and without cache __UpperCamelCase = model.generate(**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , max_new_tokens=20 , use_cache=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model.generate(**SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , max_new_tokens=20 , use_cache=SCREAMING_SNAKE_CASE_ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
328
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase__ : List[str] = 1_6 lowercase__ : str = 3_2 def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __UpperCamelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCamelCase = datasets.map( snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case : str ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCamelCase = 16 elif accelerator.mixed_precision != "no": __UpperCamelCase = 8 else: __UpperCamelCase = None return tokenizer.pad( snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) __UpperCamelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase__ : Union[str, Any] = mocked_dataloaders # noqa: F811 def A_ ( snake_case : List[str] , snake_case : List[Any] ) -> Tuple: '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1": __UpperCamelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __UpperCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: __UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['''lr'''] __UpperCamelCase = int(config['''num_epochs'''] ) __UpperCamelCase = int(config['''seed'''] ) __UpperCamelCase = int(config['''batch_size'''] ) set_seed(snake_case ) __UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case ) __UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation __UpperCamelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE __UpperCamelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer __UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case ) # Instantiate scheduler __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0] accelerator.init_trackers(snake_case , snake_case ) # Now we train the model for epoch in range(snake_case ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __UpperCamelCase = 0 for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) __UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case , references=snake_case , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , snake_case ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(snake_case ), '''epoch''': epoch, } , step=snake_case , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def A_ ( ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(snake_case , snake_case ) if __name__ == "__main__": main()
328
1
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> Union[str, Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def A__ ( self )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = 1 __UpperCamelCase = 3 __UpperCamelCase = (32, 32) __UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) return image @property def A__ ( self )-> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def A__ ( self )-> List[Any]: '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def A__ ( self )-> int: '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE_ ) @property def A__ ( self )-> Optional[int]: '''simple docstring''' def extract(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self )-> str: '''simple docstring''' __UpperCamelCase = torch.ones([0] ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' self.pixel_values.to(SCREAMING_SNAKE_CASE_ ) return self return Out() return extract def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = '''A painting of a squirrel eating a burger''' __UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE_ , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCamelCase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def A__ ( self )-> List[Any]: '''simple docstring''' __UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = '''A painting of a squirrel eating a burger''' __UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __UpperCamelCase = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) __UpperCamelCase = output.images __UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __UpperCamelCase = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE_ , )[0] __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCamelCase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=SCREAMING_SNAKE_CASE_ ) assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE_ ) assert pipe.safety_checker is None __UpperCamelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ ) # sanity check that the pipeline still works assert pipe.safety_checker is None __UpperCamelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = self.dummy_cond_unet __UpperCamelCase = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.dummy_vae __UpperCamelCase = self.dummy_text_encoder __UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 __UpperCamelCase = unet.half() __UpperCamelCase = vae.half() __UpperCamelCase = bert.half() # make sure here that pndm scheduler skips prk __UpperCamelCase = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , ) __UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = '''A painting of a squirrel eating a burger''' __UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> Any: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = ( '''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle''' ''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with''' ''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and''' ''' children from bahnhof zoo, detailed ''' ) __UpperCamelCase = 4003660346 __UpperCamelCase = 7 # without safety guidance (sld_guidance_scale = 0) __UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) __UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) __UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = '''padme amidala taking a bath artwork, safe for work, no nudity''' __UpperCamelCase = 2734971755 __UpperCamelCase = 7 __UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 __UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) __UpperCamelCase = sd_pipe.to(SCREAMING_SNAKE_CASE_ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = ( '''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.''' ''' leyendecker''' ) __UpperCamelCase = 1044355234 __UpperCamelCase = 12 __UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 __UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) __UpperCamelCase = output.images __UpperCamelCase = image[0, -3:, -3:, -1] __UpperCamelCase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
328
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowercase__ : str = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'whisper' _snake_case = ['past_key_values'] _snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = num_mel_bins __UpperCamelCase = d_model __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase = max_source_positions __UpperCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __UpperCamelCase = classifier_proj_size __UpperCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase = apply_spec_augment __UpperCamelCase = mask_time_prob __UpperCamelCase = mask_time_length __UpperCamelCase = mask_time_min_masks __UpperCamelCase = mask_feature_prob __UpperCamelCase = mask_feature_length __UpperCamelCase = mask_feature_min_masks __UpperCamelCase = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def A__ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' __UpperCamelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: __UpperCamelCase = {0: '''batch'''} else: __UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' ) return common_inputs def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]: '''simple docstring''' __UpperCamelCase = OrderedDict() __UpperCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = encoder_inputs['''input_features'''].shape[2] __UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __UpperCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = encoder_inputs.pop('''input_features''' ) __UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: __UpperCamelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def A__ ( self )-> float: '''simple docstring''' return 1E-3
328
1
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def A_ ( snake_case : str , snake_case : str , snake_case : str ) -> List[str]: '''simple docstring''' def get_masked_lm_array(snake_case : str ): __UpperCamelCase = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE" __UpperCamelCase = tf.train.load_variable(snake_case , snake_case ) if "kernel" in name: __UpperCamelCase = array.transpose() return torch.from_numpy(snake_case ) def get_encoder_array(snake_case : str ): __UpperCamelCase = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE" __UpperCamelCase = tf.train.load_variable(snake_case , snake_case ) if "kernel" in name: __UpperCamelCase = array.transpose() return torch.from_numpy(snake_case ) def get_encoder_layer_array(snake_case : int , snake_case : str ): __UpperCamelCase = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE" __UpperCamelCase = tf.train.load_variable(snake_case , snake_case ) if "kernel" in name: __UpperCamelCase = array.transpose() return torch.from_numpy(snake_case ) def get_encoder_attention_layer_array(snake_case : int , snake_case : str , snake_case : Optional[Any] ): __UpperCamelCase = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE" __UpperCamelCase = tf.train.load_variable(snake_case , snake_case ) __UpperCamelCase = array.reshape(snake_case ) if "kernel" in name: __UpperCamelCase = array.transpose() return torch.from_numpy(snake_case ) print(f"Loading model based on config from {config_path}..." ) __UpperCamelCase = BertConfig.from_json_file(snake_case ) __UpperCamelCase = BertForMaskedLM(snake_case ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __UpperCamelCase = model.bert.encoder.layer[layer_index] # Self-attention __UpperCamelCase = layer.attention.self __UpperCamelCase = get_encoder_attention_layer_array( snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape ) __UpperCamelCase = get_encoder_attention_layer_array( snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape ) __UpperCamelCase = get_encoder_attention_layer_array( snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape ) __UpperCamelCase = get_encoder_attention_layer_array( snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape ) __UpperCamelCase = get_encoder_attention_layer_array( snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape ) __UpperCamelCase = get_encoder_attention_layer_array( snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape ) # Self-attention Output __UpperCamelCase = layer.attention.output __UpperCamelCase = get_encoder_attention_layer_array( snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape ) __UpperCamelCase = get_encoder_attention_layer_array( snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape ) __UpperCamelCase = get_encoder_layer_array(snake_case , '''_attention_layer_norm/gamma''' ) __UpperCamelCase = get_encoder_layer_array(snake_case , '''_attention_layer_norm/beta''' ) # Intermediate __UpperCamelCase = layer.intermediate __UpperCamelCase = get_encoder_layer_array(snake_case , '''_intermediate_dense/kernel''' ) __UpperCamelCase = get_encoder_layer_array(snake_case , '''_intermediate_dense/bias''' ) # Output __UpperCamelCase = layer.output __UpperCamelCase = get_encoder_layer_array(snake_case , '''_output_dense/kernel''' ) __UpperCamelCase = get_encoder_layer_array(snake_case , '''_output_dense/bias''' ) __UpperCamelCase = get_encoder_layer_array(snake_case , '''_output_layer_norm/gamma''' ) __UpperCamelCase = get_encoder_layer_array(snake_case , '''_output_layer_norm/beta''' ) # Embeddings __UpperCamelCase = get_encoder_array('''_position_embedding_layer/embeddings''' ) __UpperCamelCase = get_encoder_array('''_type_embedding_layer/embeddings''' ) __UpperCamelCase = get_encoder_array('''_embedding_norm_layer/gamma''' ) __UpperCamelCase = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head __UpperCamelCase = model.cls.predictions.transform __UpperCamelCase = get_masked_lm_array('''dense/kernel''' ) __UpperCamelCase = get_masked_lm_array('''dense/bias''' ) __UpperCamelCase = get_masked_lm_array('''layer_norm/gamma''' ) __UpperCamelCase = get_masked_lm_array('''layer_norm/beta''' ) __UpperCamelCase = get_masked_lm_array('''embedding_table''' ) # Pooling __UpperCamelCase = BertPooler(config=snake_case ) __UpperCamelCase = get_encoder_array('''_pooler_layer/kernel''' ) __UpperCamelCase = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(snake_case ) # Integration test - should load without any errors ;) __UpperCamelCase = BertForMaskedLM.from_pretrained(snake_case ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": lowercase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) lowercase__ : Optional[Any] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
328
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : Any = logging.get_logger(__name__) lowercase__ : Tuple = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'xlnet' _snake_case = ['mems'] _snake_case = { 'n_token': 'vocab_size', # Backward compatibility 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="bi" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="tanh" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , )-> List[str]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = d_model __UpperCamelCase = n_layer __UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) __UpperCamelCase = d_model // n_head __UpperCamelCase = ff_activation __UpperCamelCase = d_inner __UpperCamelCase = untie_r __UpperCamelCase = attn_type __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = dropout __UpperCamelCase = mem_len __UpperCamelCase = reuse_len __UpperCamelCase = bi_data __UpperCamelCase = clamp_len __UpperCamelCase = same_length __UpperCamelCase = summary_type __UpperCamelCase = summary_use_proj __UpperCamelCase = summary_activation __UpperCamelCase = summary_last_dropout __UpperCamelCase = start_n_top __UpperCamelCase = end_n_top __UpperCamelCase = bos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = kwargs['''use_cache'''] __UpperCamelCase = use_mems_eval __UpperCamelCase = use_mems_train super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def A__ ( self )-> Optional[Any]: '''simple docstring''' logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' raise NotImplementedError( F"The model {self.model_type} is one of the few models that has no sequence length limit." )
328
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean lowercase__ : int = 0 lowercase__ : Dict = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase__ : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right lowercase__ : Dict = tuple[int, int] class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> None: '''simple docstring''' __UpperCamelCase = pos_x __UpperCamelCase = pos_y __UpperCamelCase = (pos_y, pos_x) __UpperCamelCase = goal_x __UpperCamelCase = goal_y __UpperCamelCase = g_cost __UpperCamelCase = parent __UpperCamelCase = self.calculate_heuristic() __UpperCamelCase = self.g_cost + self.h_cost def A__ ( self )-> float: '''simple docstring''' __UpperCamelCase = self.pos_x - self.goal_x __UpperCamelCase = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , SCREAMING_SNAKE_CASE_ )-> bool: '''simple docstring''' return self.f_cost < other.f_cost class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> List[Any]: '''simple docstring''' __UpperCamelCase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = [self.start] __UpperCamelCase = [] __UpperCamelCase = False def A__ ( self )-> list[TPosition]: '''simple docstring''' while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __UpperCamelCase = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(SCREAMING_SNAKE_CASE_ ) self.closed_nodes.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.get_successors(SCREAMING_SNAKE_CASE_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: # retrieve the best current path __UpperCamelCase = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE_ ) return [self.start.pos] def A__ ( self , SCREAMING_SNAKE_CASE_ )-> list[Node]: '''simple docstring''' __UpperCamelCase = [] for action in delta: __UpperCamelCase = parent.pos_x + action[1] __UpperCamelCase = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE_ , ) ) return successors def A__ ( self , SCREAMING_SNAKE_CASE_ )-> list[TPosition]: '''simple docstring''' __UpperCamelCase = node __UpperCamelCase = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __UpperCamelCase = current_node.parent path.reverse() return path class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' __UpperCamelCase = AStar(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = AStar(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = False def A__ ( self )-> list[TPosition]: '''simple docstring''' while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() __UpperCamelCase = self.fwd_astar.open_nodes.pop(0 ) __UpperCamelCase = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ ) self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = current_bwd_node __UpperCamelCase = current_fwd_node __UpperCamelCase = { self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ), self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: # retrieve the best current path __UpperCamelCase = astar.open_nodes.pop( astar.open_nodes.index(SCREAMING_SNAKE_CASE_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(SCREAMING_SNAKE_CASE_ ) else: astar.open_nodes.append(SCREAMING_SNAKE_CASE_ ) return [self.fwd_astar.start.pos] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> list[TPosition]: '''simple docstring''' __UpperCamelCase = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ ) bwd_path.pop() bwd_path.reverse() __UpperCamelCase = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] lowercase__ : Union[str, Any] = (0, 0) lowercase__ : Optional[Any] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowercase__ : Optional[int] = time.time() lowercase__ : List[str] = AStar(init, goal) lowercase__ : Optional[Any] = a_star.search() lowercase__ : Dict = time.time() - start_time print(F"AStar execution time = {end_time:f} seconds") lowercase__ : Dict = time.time() lowercase__ : Dict = BidirectionalAStar(init, goal) lowercase__ : Tuple = time.time() - bd_start_time print(F"BidirectionalAStar execution time = {bd_end_time:f} seconds")
328
from typing import Any class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None def __repr__( self )-> str: '''simple docstring''' return F"Node({self.data})" class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = None def __iter__( self )-> Any: '''simple docstring''' __UpperCamelCase = self.head while node: yield node.data __UpperCamelCase = node.next def __len__( self )-> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self )-> str: '''simple docstring''' return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] ) def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) __UpperCamelCase = self.head for _ in range(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = current.next __UpperCamelCase = data def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(0 , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) __UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ ) if self.head is None: __UpperCamelCase = new_node elif index == 0: __UpperCamelCase = self.head # link new_node to head __UpperCamelCase = new_node else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = new_node def A__ ( self )-> None: # print every node data '''simple docstring''' print(self ) def A__ ( self )-> Any: '''simple docstring''' return self.delete_nth(0 ) def A__ ( self )-> Any: # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def A__ ( self , SCREAMING_SNAKE_CASE_ = 0 )-> Any: '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) __UpperCamelCase = self.head # default first node if index == 0: __UpperCamelCase = self.head.next else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = temp.next.next return delete_node.data def A__ ( self )-> bool: '''simple docstring''' return self.head is None def A__ ( self )-> None: '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = self.head while current: # Store the current node's next node. __UpperCamelCase = current.next # Make the current node's next point backwards __UpperCamelCase = prev # Make the previous node be the current node __UpperCamelCase = current # Make the current node the next node (to progress iteration) __UpperCamelCase = next_node # Return prev in order to put the head at the end __UpperCamelCase = prev def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = LinkedList() assert linked_list.is_empty() is True assert str(snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(snake_case ) == i linked_list.insert_nth(snake_case , i + 1 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(snake_case ) == 9 assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __UpperCamelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(snake_case ) == "->".join(str(snake_case ) for i in range(-8 , 1 ) ) def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = [ -9, 100, Node(77345112 ), '''dlrow olleH''', 7, 5555, 0, -192.55555, '''Hello, world!''', 77.9, Node(10 ), None, None, 12.20, ] __UpperCamelCase = LinkedList() for i in test_input: linked_list.insert_tail(snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __UpperCamelCase = linked_list.delete_head() assert result == -9 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __UpperCamelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __UpperCamelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(snake_case ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def A_ ( ) -> Any: '''simple docstring''' from doctest import testmod testmod() __UpperCamelCase = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(snake_case ) print('''\nReading/changing Node data using indexing:''' ) print(f"Element at Position 1: {linked_list[1]}" ) __UpperCamelCase = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(snake_case ) print(f"length of linked_list is : {len(snake_case )}" ) if __name__ == "__main__": main()
328
1
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowercase__ : List[str] = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def A_ ( snake_case : Optional[Any] , snake_case : Any=None ) -> Union[str, Any]: '''simple docstring''' require_version(deps[pkg] , snake_case )
328
import math def A_ ( snake_case : int ) -> bool: '''simple docstring''' return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num def A_ ( snake_case : int ) -> bool: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = n while left <= right: __UpperCamelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
328
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() lowercase__ : Optional[int] = logging.get_logger(__name__) lowercase__ : Optional[int] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } lowercase__ : List[Any] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def A_ ( snake_case : str ) -> str: '''simple docstring''' __UpperCamelCase = {} with open(snake_case , '''r''' ) as file: for line_number, line in enumerate(snake_case ): __UpperCamelCase = line.strip() if line: __UpperCamelCase = line.split() __UpperCamelCase = line_number __UpperCamelCase = words[0] __UpperCamelCase = value return result def A_ ( snake_case : int , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : str ) -> Union[str, Any]: '''simple docstring''' for attribute in key.split('''.''' ): __UpperCamelCase = getattr(snake_case , snake_case ) __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case ): __UpperCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]] __UpperCamelCase = '''param''' if weight_type is not None and weight_type != "param": __UpperCamelCase = getattr(snake_case , snake_case ).shape elif weight_type is not None and weight_type == "param": __UpperCamelCase = hf_pointer for attribute in hf_param_name.split('''.''' ): __UpperCamelCase = getattr(snake_case , snake_case ) __UpperCamelCase = shape_pointer.shape # let's reduce dimension __UpperCamelCase = value[0] else: __UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": __UpperCamelCase = value elif weight_type == "weight_g": __UpperCamelCase = value elif weight_type == "weight_v": __UpperCamelCase = value elif weight_type == "bias": __UpperCamelCase = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __UpperCamelCase = getattr(snake_case , snake_case ) __UpperCamelCase = value else: __UpperCamelCase = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def A_ ( snake_case : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str] ) -> Optional[int]: '''simple docstring''' __UpperCamelCase = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case ): __UpperCamelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]] __UpperCamelCase = '''param''' if weight_type is not None and weight_type != "param": __UpperCamelCase = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __UpperCamelCase = '''.'''.join([key, hf_param_name] ) else: __UpperCamelCase = key __UpperCamelCase = value if '''lm_head''' in full_key else value[0] lowercase__ : Dict = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def A_ ( snake_case : Tuple , snake_case : Any , snake_case : List[Any]=None , snake_case : Optional[int]=None ) -> List[Any]: '''simple docstring''' __UpperCamelCase = False for key, mapped_key in MAPPING.items(): __UpperCamelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __UpperCamelCase = True if "*" in mapped_key: __UpperCamelCase = name.split(snake_case )[0].split('''.''' )[-2] __UpperCamelCase = mapped_key.replace('''*''' , snake_case ) if "weight_g" in name: __UpperCamelCase = '''weight_g''' elif "weight_v" in name: __UpperCamelCase = '''weight_v''' elif "bias" in name: __UpperCamelCase = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCamelCase = '''weight''' else: __UpperCamelCase = None if hf_dict is not None: rename_dict(snake_case , snake_case , snake_case , snake_case , snake_case ) else: set_recursively(snake_case , snake_case , snake_case , snake_case , snake_case ) return is_used return is_used def A_ ( snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = fairseq_model.state_dict() __UpperCamelCase = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __UpperCamelCase = False if "conv_layers" in name: load_conv_layer( snake_case , snake_case , snake_case , snake_case , hf_model.config.feat_extract_norm == '''group''' , ) __UpperCamelCase = True else: __UpperCamelCase = load_wavaveca_layer(snake_case , snake_case , snake_case ) if not is_used: unused_weights.append(snake_case ) logger.warning(f"Unused weights: {unused_weights}" ) def A_ ( snake_case : Optional[int] , snake_case : int , snake_case : List[Any] , snake_case : Dict , snake_case : int ) -> str: '''simple docstring''' __UpperCamelCase = full_name.split('''conv_layers.''' )[-1] __UpperCamelCase = name.split('''.''' ) __UpperCamelCase = int(items[0] ) __UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) __UpperCamelCase = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(snake_case ) @torch.no_grad() def A_ ( snake_case : str , snake_case : int , snake_case : Dict=None , snake_case : List[Any]=None , snake_case : List[str]=True , snake_case : Optional[Any]=False ) -> Tuple: '''simple docstring''' if config_path is not None: __UpperCamelCase = WavaVecaConfig.from_pretrained(snake_case ) else: __UpperCamelCase = WavaVecaConfig() if is_seq_class: __UpperCamelCase = read_txt_into_dict(snake_case ) __UpperCamelCase = idalabel __UpperCamelCase = WavaVecaForSequenceClassification(snake_case ) __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=snake_case , return_attention_mask=snake_case , ) feature_extractor.save_pretrained(snake_case ) elif is_finetuned: if dict_path: __UpperCamelCase = Dictionary.load(snake_case ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __UpperCamelCase = target_dict.pad_index __UpperCamelCase = target_dict.bos_index __UpperCamelCase = target_dict.eos_index __UpperCamelCase = len(target_dict.symbols ) __UpperCamelCase = os.path.join(snake_case , '''vocab.json''' ) if not os.path.isdir(snake_case ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case ) ) return os.makedirs(snake_case , exist_ok=snake_case ) __UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched __UpperCamelCase = 0 __UpperCamelCase = 1 with open(snake_case , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(snake_case , snake_case ) __UpperCamelCase = WavaVecaCTCTokenizer( snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case , ) __UpperCamelCase = True if config.feat_extract_norm == '''layer''' else False __UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=snake_case , return_attention_mask=snake_case , ) __UpperCamelCase = WavaVecaProcessor(feature_extractor=snake_case , tokenizer=snake_case ) processor.save_pretrained(snake_case ) __UpperCamelCase = WavaVecaForCTC(snake_case ) else: __UpperCamelCase = WavaVecaForPreTraining(snake_case ) if is_finetuned or is_seq_class: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __UpperCamelCase = argparse.Namespace(task='''audio_pretraining''' ) __UpperCamelCase = fairseq.tasks.setup_task(snake_case ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case ) __UpperCamelCase = model[0].eval() recursively_load_weights(snake_case , snake_case , not is_finetuned ) hf_wavavec.save_pretrained(snake_case ) if __name__ == "__main__": lowercase__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) lowercase__ : List[Any] = parser.parse_args() lowercase__ : int = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
328
def A_ ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowercase__ : List[str] = generate_large_matrix() lowercase__ : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A_ ( snake_case : list[list[int]] ) -> None: '''simple docstring''' assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid ) assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) ) def A_ ( snake_case : list[int] ) -> int: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __UpperCamelCase = (left + right) // 2 __UpperCamelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __UpperCamelCase = mid + 1 else: __UpperCamelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(snake_case ) def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = len(grid[0] ) for i in range(len(snake_case ) ): __UpperCamelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(snake_case ) * len(grid[0] )) - total def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' __UpperCamelCase = 0 for row in grid: for i, number in enumerate(snake_case ): if number < 0: total += len(snake_case ) - i break return total def A_ ( ) -> None: '''simple docstring''' from timeit import timeit print('''Running benchmarks''' ) __UpperCamelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 ) print(f"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
328
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = ShapEImgaImgPipeline _snake_case = ['image'] _snake_case = ['image'] _snake_case = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] _snake_case = False @property def A__ ( self )-> Any: '''simple docstring''' return 32 @property def A__ ( self )-> Optional[Any]: '''simple docstring''' return 32 @property def A__ ( self )-> Any: '''simple docstring''' return self.time_input_dim * 4 @property def A__ ( self )-> Any: '''simple docstring''' return 8 @property def A__ ( self )-> Any: '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __UpperCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE_ ) return model @property def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = CLIPImageProcessor( crop_size=224 , do_center_crop=SCREAMING_SNAKE_CASE_ , do_normalize=SCREAMING_SNAKE_CASE_ , do_resize=SCREAMING_SNAKE_CASE_ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor @property def A__ ( self )-> Dict: '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''embedding_proj_norm_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __UpperCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE_ ) return model @property def A__ ( self )-> List[Any]: '''simple docstring''' torch.manual_seed(0 ) __UpperCamelCase = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __UpperCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE_ ) return model def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = self.dummy_prior __UpperCamelCase = self.dummy_image_encoder __UpperCamelCase = self.dummy_image_processor __UpperCamelCase = self.dummy_renderer __UpperCamelCase = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE_ , clip_sample=SCREAMING_SNAKE_CASE_ , clip_sample_range=1.0 , ) __UpperCamelCase = { '''prior''': prior, '''image_encoder''': image_encoder, '''image_processor''': image_processor, '''renderer''': renderer, '''scheduler''': scheduler, } return components def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 )-> Optional[int]: '''simple docstring''' __UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ ) if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ): __UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: __UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = { '''image''': input_image, '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = '''cpu''' __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = output.images[0] __UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCamelCase = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def A__ ( self )-> str: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = torch_device == '''cpu''' __UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE_ , relax_max_difference=SCREAMING_SNAKE_CASE_ , ) def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = self.get_dummy_components() __UpperCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = 1 __UpperCamelCase = 2 __UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) for key in inputs.keys(): if key in self.batch_params: __UpperCamelCase = batch_size * [inputs[key]] __UpperCamelCase = pipe(**SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A__ ( self )-> List[Any]: '''simple docstring''' __UpperCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' ) __UpperCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_img2img_out.npy''' ) __UpperCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' ) __UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 ) __UpperCamelCase = pipe( SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
328
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase__ : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = size if size is not None else {'''shortest_edge''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = crop_pct __UpperCamelCase = resample __UpperCamelCase = do_center_crop __UpperCamelCase = crop_size __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) if crop_pct is not None: if "shortest_edge" in size: __UpperCamelCase = int(size['''shortest_edge'''] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __UpperCamelCase = int(size['''height'''] / crop_pct ) else: __UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct )) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) else: if "shortest_edge" in size: __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ ) elif "height" in size and "width" in size: __UpperCamelCase = (size['''height'''], size['''width''']) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str: '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image: '''simple docstring''' __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = image_mean if image_mean is not None else self.image_mean __UpperCamelCase = image_std if image_std is not None else self.image_std __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else self.crop_size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_pct is None: raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: __UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
328
1
from typing import Any class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None def __repr__( self )-> str: '''simple docstring''' return F"Node({self.data})" class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = None def __iter__( self )-> Any: '''simple docstring''' __UpperCamelCase = self.head while node: yield node.data __UpperCamelCase = node.next def __len__( self )-> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self )-> str: '''simple docstring''' return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] ) def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) __UpperCamelCase = self.head for _ in range(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = current.next __UpperCamelCase = data def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(0 , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) __UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ ) if self.head is None: __UpperCamelCase = new_node elif index == 0: __UpperCamelCase = self.head # link new_node to head __UpperCamelCase = new_node else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = new_node def A__ ( self )-> None: # print every node data '''simple docstring''' print(self ) def A__ ( self )-> Any: '''simple docstring''' return self.delete_nth(0 ) def A__ ( self )-> Any: # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def A__ ( self , SCREAMING_SNAKE_CASE_ = 0 )-> Any: '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) __UpperCamelCase = self.head # default first node if index == 0: __UpperCamelCase = self.head.next else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = temp.next.next return delete_node.data def A__ ( self )-> bool: '''simple docstring''' return self.head is None def A__ ( self )-> None: '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = self.head while current: # Store the current node's next node. __UpperCamelCase = current.next # Make the current node's next point backwards __UpperCamelCase = prev # Make the previous node be the current node __UpperCamelCase = current # Make the current node the next node (to progress iteration) __UpperCamelCase = next_node # Return prev in order to put the head at the end __UpperCamelCase = prev def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = LinkedList() assert linked_list.is_empty() is True assert str(snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(snake_case ) == i linked_list.insert_nth(snake_case , i + 1 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(snake_case ) == 9 assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __UpperCamelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(snake_case ) == "->".join(str(snake_case ) for i in range(-8 , 1 ) ) def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = [ -9, 100, Node(77345112 ), '''dlrow olleH''', 7, 5555, 0, -192.55555, '''Hello, world!''', 77.9, Node(10 ), None, None, 12.20, ] __UpperCamelCase = LinkedList() for i in test_input: linked_list.insert_tail(snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __UpperCamelCase = linked_list.delete_head() assert result == -9 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __UpperCamelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __UpperCamelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(snake_case ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def A_ ( ) -> Any: '''simple docstring''' from doctest import testmod testmod() __UpperCamelCase = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(snake_case ) print('''\nReading/changing Node data using indexing:''' ) print(f"Element at Position 1: {linked_list[1]}" ) __UpperCamelCase = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(snake_case ) print(f"length of linked_list is : {len(snake_case )}" ) if __name__ == "__main__": main()
328
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowercase__ : Any = getLogger(__name__) lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu" def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict: '''simple docstring''' __UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' ) __UpperCamelCase = str(snake_case ) __UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case ) if fpaa: __UpperCamelCase = model.half() __UpperCamelCase = AutoTokenizer.from_pretrained(snake_case ) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. __UpperCamelCase = time.time() # update config with task specific params use_task_specific_params(snake_case , snake_case ) if prefix is None: __UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ): __UpperCamelCase = [prefix + text for text in examples_chunk] __UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case ) __UpperCamelCase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , ) __UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() __UpperCamelCase = int(time.time() - start_time ) # seconds __UpperCamelCase = len(snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def A_ ( ) -> Tuple: '''simple docstring''' return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def A_ ( snake_case : str=True ) -> int: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __UpperCamelCase , __UpperCamelCase = parser.parse_known_args() __UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case ) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}" ) __UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __UpperCamelCase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) __UpperCamelCase = generate_summaries_or_translations( snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , ) if args.reference_path is None: return {} # Compute scores __UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge __UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()] __UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )] __UpperCamelCase = score_fn(snake_case , snake_case ) scores.update(snake_case ) if args.dump_args: scores.update(snake_case ) if args.info: __UpperCamelCase = args.info if verbose: print(snake_case ) if args.score_path is not None: json.dump(snake_case , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
328
1
lowercase__ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] lowercase__ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] lowercase__ : str = { 0: "Sunday", 1: "Monday", 2: "Tuesday", 3: "Wednesday", 4: "Thursday", 5: "Friday", 6: "Saturday", } def A_ ( snake_case : int , snake_case : int , snake_case : int ) -> str: '''simple docstring''' assert len(str(snake_case ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __UpperCamelCase = year // 100 __UpperCamelCase = (5 * (century % 4) + 2) % 7 __UpperCamelCase = year % 100 __UpperCamelCase = centurian % 12 __UpperCamelCase = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __UpperCamelCase = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) __UpperCamelCase = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
328
from math import factorial def A_ ( snake_case : int = 100 ) -> int: '''simple docstring''' return sum(int(snake_case ) for x in str(factorial(snake_case ) ) ) if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
328
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'roformer' def __init__( self , SCREAMING_SNAKE_CASE_=50000 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , )-> List[Any]: '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size if embedding_size is None else embedding_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = rotary_value __UpperCamelCase = use_cache class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def A__ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCamelCase = {0: '''batch''', 1: '''sequence'''} __UpperCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
328
def A_ ( snake_case : list ) -> list: '''simple docstring''' __UpperCamelCase = len(snake_case ) for i in range(1 , snake_case ): __UpperCamelCase = collection[i] __UpperCamelCase = 0 __UpperCamelCase = i - 1 while low <= high: __UpperCamelCase = (low + high) // 2 if val < collection[mid]: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 for j in range(snake_case , snake_case , -1 ): __UpperCamelCase = collection[j - 1] __UpperCamelCase = val return collection if __name__ == "__main__": lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip() lowercase__ : str = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
328
1
def A_ ( snake_case : str ) -> bool: '''simple docstring''' return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') ) def A_ ( snake_case : str ) -> bool: '''simple docstring''' __UpperCamelCase = credit_card_number __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 2 for i in range(snake_case , -1 , -2 ): # double the value of every second digit __UpperCamelCase = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 __UpperCamelCase = cc_number[:i] + str(snake_case ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(snake_case ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def A_ ( snake_case : str ) -> bool: '''simple docstring''' __UpperCamelCase = f"{credit_card_number} is an invalid credit card number because" if not credit_card_number.isdigit(): print(f"{error_message} it has nonnumerical characters." ) return False if not 13 <= len(snake_case ) <= 16: print(f"{error_message} of its length." ) return False if not validate_initial_digits(snake_case ): print(f"{error_message} of its first two digits." ) return False if not luhn_validation(snake_case ): print(f"{error_message} it fails the Luhn check." ) return False print(f"{credit_card_number} is a valid credit card number." ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("4111111111111111") validate_credit_card_number("32323")
328
from __future__ import annotations from collections import deque class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' __UpperCamelCase = [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(SCREAMING_SNAKE_CASE_ ) self.set_fail_transitions() def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int | None: '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' __UpperCamelCase = 0 for character in keyword: __UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __UpperCamelCase = len(self.adlist ) - 1 else: __UpperCamelCase = next_state self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> None: '''simple docstring''' __UpperCamelCase = deque() for node in self.adlist[0]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = 0 while q: __UpperCamelCase = q.popleft() for child in self.adlist[r]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.adlist[r]['''fail_state'''] while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) is None and state != 0 ): __UpperCamelCase = self.adlist[state]['''fail_state'''] __UpperCamelCase = self.find_next_state( SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: __UpperCamelCase = 0 __UpperCamelCase = ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict[str, list[int]]: '''simple docstring''' __UpperCamelCase = {} # returns a dict with keywords and list of its occurrences __UpperCamelCase = 0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None and current_state != 0 ): __UpperCamelCase = self.adlist[current_state]['''fail_state'''] __UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) if next_state is None: __UpperCamelCase = 0 else: __UpperCamelCase = next_state for key in self.adlist[current_state]["output"]: if key not in result: __UpperCamelCase = [] result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
328
1
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowercase__ : str = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) requires_backends(self , '''vision''' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def A__ ( self , SCREAMING_SNAKE_CASE_=None )-> List[Any]: '''simple docstring''' __UpperCamelCase = {} if top_k is not None: __UpperCamelCase = top_k return {}, {}, postprocess_params def __call__( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> List[Any]: '''simple docstring''' return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' __UpperCamelCase = load_image(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=self.framework ) return model_inputs def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[Any]: '''simple docstring''' __UpperCamelCase = self.model(**SCREAMING_SNAKE_CASE_ ) return model_outputs def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=5 )-> int: '''simple docstring''' if top_k > self.model.config.num_labels: __UpperCamelCase = self.model.config.num_labels if self.framework == "pt": __UpperCamelCase = model_outputs.logits.softmax(-1 )[0] __UpperCamelCase , __UpperCamelCase = probs.topk(SCREAMING_SNAKE_CASE_ ) elif self.framework == "tf": __UpperCamelCase = stable_softmax(model_outputs.logits , axis=-1 )[0] __UpperCamelCase = tf.math.top_k(SCREAMING_SNAKE_CASE_ , k=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(F"Unsupported framework: {self.framework}" ) __UpperCamelCase = scores.tolist() __UpperCamelCase = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
328
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , )-> Dict: '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = num_labels __UpperCamelCase = num_choices __UpperCamelCase = scope def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self )-> str: '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' __UpperCamelCase = DistilBertModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' __UpperCamelCase = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.num_choices __UpperCamelCase = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs __UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _snake_case = ( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _snake_case = True _snake_case = True _snake_case = True _snake_case = True def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = DistilBertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 ) def A__ ( self )-> Dict: '''simple docstring''' self.config_tester.run_common_tests() def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) @slow def A__ ( self )-> List[str]: '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @slow @require_torch_gpu def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __UpperCamelCase = True __UpperCamelCase = model_class(config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.jit.trace( SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) ) __UpperCamelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ ) loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @slow def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] __UpperCamelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.tensor( [[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
328
1
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo lowercase__ : Optional[int] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" lowercase__ : Union[str, Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" lowercase__ : List[str] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def A__ ( self )-> MetricInfo: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 4 , )-> Dict[str, float]: '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE_ , hypotheses=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ ) }
328
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed lowercase__ : Optional[Any] = logging.getLogger(__name__) def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int: '''simple docstring''' def get_dataset(snake_case : Optional[int] ): __UpperCamelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __UpperCamelCase = get_dataset(snake_case ) __UpperCamelCase = get_dataset(snake_case ) __UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) __UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any: '''simple docstring''' __UpperCamelCase = [] for epoch in range(snake_case ): # Train quickly model.train() for batch in dataloader: __UpperCamelCase , __UpperCamelCase = batch __UpperCamelCase = model(snake_case ) __UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case ) accelerator.backward(snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self )-> Tuple: '''simple docstring''' super().__init__() __UpperCamelCase = nn.Parameter(torch.randn(1 ) ) __UpperCamelCase = nn.Parameter(torch.randn(1 ) ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' return x * self.a + self.b class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> Tuple: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline __UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def A__ ( self )-> Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() # Train baseline __UpperCamelCase = Accelerator() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial __UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() __UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() # Train partially set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = Accelerator() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) accelerator.load_state(SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save everything __UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) # Load everything back in and make sure all states work accelerator.load_state(SCREAMING_SNAKE_CASE_ ) test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> List[str]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() __UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() # Train partially set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = torch.tensor([1, 2, 3] ) __UpperCamelCase = torch.tensor([2, 3, 4] ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(net.parameters() ) __UpperCamelCase = Accelerator() with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve: accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() __UpperCamelCase = scheduler.state_dict() train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() ) def A__ ( self )-> List[str]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 ) # Train baseline __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) if __name__ == "__main__": lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing" lowercase__ : List[Any] = DummyModel() lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3) lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) lowercase__ , lowercase__ : str = dummy_dataloaders() lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: lowercase__ : int = group["params"][0].device break assert param_device.type == accelerator.device.type lowercase__ : Union[str, Any] = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: lowercase__ : Any = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: lowercase__ : List[Any] = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
328
1
from PIL import Image def A_ ( snake_case : Image ) -> Image: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = image.size __UpperCamelCase = 0 __UpperCamelCase = image.load() for i in range(snake_case ): for j in range(snake_case ): __UpperCamelCase = pixels[j, i] mean += pixel mean //= width * height for j in range(snake_case ): for i in range(snake_case ): __UpperCamelCase = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": lowercase__ : Optional[Any] = mean_threshold(Image.open("path_to_image").convert("L")) image.save("output_image_path")
328
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]: '''simple docstring''' super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = Sql( cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , ) # Build dataset for splits __UpperCamelCase = self.builder.as_dataset( split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory ) return dataset class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]: '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F"num_proc {num_proc} must be an integer > 0." ) __UpperCamelCase = dataset __UpperCamelCase = name __UpperCamelCase = con __UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __UpperCamelCase = num_proc __UpperCamelCase = to_sql_kwargs def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs ) return written def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args __UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __UpperCamelCase = query_table( table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __UpperCamelCase = batch.to_pandas() __UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return num_rows or len(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' __UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
328
1
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def A_ ( snake_case : Dict ) -> Optional[Any]: '''simple docstring''' if "model" in orig_key: __UpperCamelCase = orig_key.replace('''model.''' , '''''' ) if "norm1" in orig_key: __UpperCamelCase = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' ) if "norm2" in orig_key: __UpperCamelCase = orig_key.replace('''norm2''' , '''output.LayerNorm''' ) if "norm" in orig_key: __UpperCamelCase = orig_key.replace('''norm''' , '''LayerNorm''' ) if "transformer" in orig_key: __UpperCamelCase = orig_key.split('''.''' )[0].split('''_''' )[-1] __UpperCamelCase = orig_key.replace(f"transformer_{layer_num}" , f"encoder.layer.{layer_num}" ) if "mha.attn" in orig_key: __UpperCamelCase = orig_key.replace('''mha.attn''' , '''attention.self''' ) if "mha" in orig_key: __UpperCamelCase = orig_key.replace('''mha''' , '''attention''' ) if "W_q" in orig_key: __UpperCamelCase = orig_key.replace('''W_q''' , '''self.query''' ) if "W_k" in orig_key: __UpperCamelCase = orig_key.replace('''W_k''' , '''self.key''' ) if "W_v" in orig_key: __UpperCamelCase = orig_key.replace('''W_v''' , '''self.value''' ) if "ff1" in orig_key: __UpperCamelCase = orig_key.replace('''ff1''' , '''intermediate.dense''' ) if "ff2" in orig_key: __UpperCamelCase = orig_key.replace('''ff2''' , '''output.dense''' ) if "ff" in orig_key: __UpperCamelCase = orig_key.replace('''ff''' , '''output.dense''' ) if "mlm_class" in orig_key: __UpperCamelCase = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' ) if "mlm" in orig_key: __UpperCamelCase = orig_key.replace('''mlm''' , '''cls.predictions.transform''' ) if "cls" not in orig_key: __UpperCamelCase = '''yoso.''' + orig_key return orig_key def A_ ( snake_case : Union[str, Any] , snake_case : List[str] ) -> Any: '''simple docstring''' for key in orig_state_dict.copy().keys(): __UpperCamelCase = orig_state_dict.pop(snake_case ) if ("pooler" in key) or ("sen_class" in key): continue else: __UpperCamelCase = val __UpperCamelCase = orig_state_dict['''cls.predictions.decoder.bias'''] __UpperCamelCase = torch.arange(snake_case ).expand((1, -1) ) + 2 return orig_state_dict def A_ ( snake_case : Dict , snake_case : Optional[int] , snake_case : Any ) -> int: '''simple docstring''' __UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )['''model_state_dict'''] __UpperCamelCase = YosoConfig.from_json_file(snake_case ) __UpperCamelCase = YosoForMaskedLM(snake_case ) __UpperCamelCase = convert_checkpoint_helper(config.max_position_embeddings , snake_case ) print(model.load_state_dict(snake_case ) ) model.eval() model.save_pretrained(snake_case ) print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" ) if __name__ == "__main__": lowercase__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The json file for YOSO model config.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) lowercase__ : List[str] = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
328
def A_ ( snake_case : str ) -> int: '''simple docstring''' assert column_title.isupper() __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 1 __UpperCamelCase = 0 while index >= 0: __UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
328
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE_=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="relu" , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , )-> int: '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = image_size __UpperCamelCase = num_channels __UpperCamelCase = embeddings_size __UpperCamelCase = hidden_sizes __UpperCamelCase = depths __UpperCamelCase = is_training __UpperCamelCase = use_labels __UpperCamelCase = hidden_act __UpperCamelCase = num_labels __UpperCamelCase = scope __UpperCamelCase = len(SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.num_labels ) __UpperCamelCase = self.get_config() return config, pixel_values, labels def A__ ( self )-> str: '''simple docstring''' return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = TFResNetModel(config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = TFResNetForImageClassification(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs __UpperCamelCase = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _snake_case = ( {'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification} if is_tf_available() else {} ) _snake_case = False _snake_case = False _snake_case = False _snake_case = False _snake_case = False def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = TFResNetModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Optional[Any]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self )-> Any: '''simple docstring''' return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def A__ ( self )-> int: '''simple docstring''' pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def A__ ( self )-> Optional[int]: '''simple docstring''' pass def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase = [*signature.parameters.keys()] __UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Any: '''simple docstring''' def check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = model_class(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCamelCase = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: __UpperCamelCase = layer_type __UpperCamelCase = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> List[Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @slow def A__ ( self )-> Tuple: '''simple docstring''' for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def A_ ( ) -> Optional[int]: '''simple docstring''' __UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self )-> List[Any]: '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __UpperCamelCase = self.default_image_processor __UpperCamelCase = prepare_img() __UpperCamelCase = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''tf''' ) # forward pass __UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits __UpperCamelCase = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = tf.constant([-1_1.1_0_6_9, -9.7_8_7_7, -8.3_7_7_7] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
328
def A_ ( snake_case : int ) -> None: '''simple docstring''' __UpperCamelCase = generate_pascal_triangle(snake_case ) for row_idx in range(snake_case ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=''' ''' ) else: print(triangle[row_idx][col_idx] , end='''''' ) print() def A_ ( snake_case : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __UpperCamelCase = [] for current_row_idx in range(snake_case ): __UpperCamelCase = populate_current_row(snake_case , snake_case ) triangle.append(snake_case ) return triangle def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]: '''simple docstring''' __UpperCamelCase = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 __UpperCamelCase , __UpperCamelCase = 1, 1 for current_col_idx in range(1 , snake_case ): calculate_current_element( snake_case , snake_case , snake_case , snake_case ) return current_row def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None: '''simple docstring''' __UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1] __UpperCamelCase = triangle[current_row_idx - 1][current_col_idx] __UpperCamelCase = above_to_left_elt + above_to_right_elt def A_ ( snake_case : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __UpperCamelCase = [[1]] for row_index in range(1 , snake_case ): __UpperCamelCase = [0] + result[-1] + [0] __UpperCamelCase = row_index + 1 # Calculate the number of distinct elements in a row __UpperCamelCase = sum(divmod(snake_case , 2 ) ) __UpperCamelCase = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] __UpperCamelCase = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() __UpperCamelCase = row_first_half + row_second_half result.append(snake_case ) return result def A_ ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None: __UpperCamelCase = f"{func.__name__}({value})" __UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(snake_case , snake_case ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
328
1
def A_ ( snake_case : int ) -> None: '''simple docstring''' __UpperCamelCase = generate_pascal_triangle(snake_case ) for row_idx in range(snake_case ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=''' ''' ) else: print(triangle[row_idx][col_idx] , end='''''' ) print() def A_ ( snake_case : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __UpperCamelCase = [] for current_row_idx in range(snake_case ): __UpperCamelCase = populate_current_row(snake_case , snake_case ) triangle.append(snake_case ) return triangle def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]: '''simple docstring''' __UpperCamelCase = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 __UpperCamelCase , __UpperCamelCase = 1, 1 for current_col_idx in range(1 , snake_case ): calculate_current_element( snake_case , snake_case , snake_case , snake_case ) return current_row def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None: '''simple docstring''' __UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1] __UpperCamelCase = triangle[current_row_idx - 1][current_col_idx] __UpperCamelCase = above_to_left_elt + above_to_right_elt def A_ ( snake_case : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __UpperCamelCase = [[1]] for row_index in range(1 , snake_case ): __UpperCamelCase = [0] + result[-1] + [0] __UpperCamelCase = row_index + 1 # Calculate the number of distinct elements in a row __UpperCamelCase = sum(divmod(snake_case , 2 ) ) __UpperCamelCase = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] __UpperCamelCase = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() __UpperCamelCase = row_first_half + row_second_half result.append(snake_case ) return result def A_ ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None: __UpperCamelCase = f"{func.__name__}({value})" __UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(snake_case , snake_case ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
328
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowercase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) lowercase__ : Any = parser.parse_args() lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowercase__ : List[str] = CLIPImageProcessor() lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
328
1
import os import sys import unittest lowercase__ : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowercase__ : Optional[int] = os.path.join(git_repo_path, "src", "diffusers") class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = find_backend(''' if not is_torch_available():''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''torch''' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") __UpperCamelCase = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''torch_and_transformers''' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") __UpperCamelCase = find_backend( ''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''torch_and_transformers_and_onnx''' ) def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , SCREAMING_SNAKE_CASE_ ) self.assertIn('''torch_and_transformers''' , SCREAMING_SNAKE_CASE_ ) self.assertIn('''flax_and_transformers''' , SCREAMING_SNAKE_CASE_ ) self.assertIn('''torch_and_transformers_and_onnx''' , SCREAMING_SNAKE_CASE_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''UNet2DModel''' , objects['''torch'''] ) self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] ) self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] ) self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] ) self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] ) self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] ) def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , '''\nCONSTANT = None\n''' ) __UpperCamelCase = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( SCREAMING_SNAKE_CASE_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __UpperCamelCase = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, \'torch\') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, \'torch\') ''' __UpperCamelCase = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) ''' __UpperCamelCase = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , SCREAMING_SNAKE_CASE_ )
328
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowercase__ : Union[str, Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" lowercase__ : Optional[Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" lowercase__ : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" lowercase__ : Optional[int] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" lowercase__ : Optional[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def A__ ( self )-> Tuple: '''simple docstring''' return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 10, 100] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3.0 )-> Union[str, Any]: '''simple docstring''' if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor: __UpperCamelCase = [] __UpperCamelCase = Counter() __UpperCamelCase = 0 __UpperCamelCase = defaultdict(SCREAMING_SNAKE_CASE_ ) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): for candidate in candidates: __UpperCamelCase = candidate + '''\n''' + test_case __UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id]) __UpperCamelCase = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) futures.append(SCREAMING_SNAKE_CASE_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) __UpperCamelCase , __UpperCamelCase = [], [] for result in results.values(): result.sort() __UpperCamelCase = [r[1]['''passed'''] for r in result] total.append(len(SCREAMING_SNAKE_CASE_ ) ) correct.append(sum(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = k __UpperCamelCase = {F"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def A_ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[Any] ) -> Optional[Any]: '''simple docstring''' def estimator(snake_case : int , snake_case : int , snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(snake_case , snake_case ): __UpperCamelCase = itertools.repeat(snake_case , len(snake_case ) ) else: assert len(snake_case ) == len(snake_case ) __UpperCamelCase = iter(snake_case ) return np.array([estimator(int(snake_case ) , int(snake_case ) , snake_case ) for n, c in zip(snake_case , snake_case )] )
328
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase__ : str = logging.get_logger(__name__) lowercase__ : Tuple = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'convnextv2' def __init__( self , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , )-> Dict: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = num_channels __UpperCamelCase = patch_size __UpperCamelCase = num_stages __UpperCamelCase = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes __UpperCamelCase = [3, 3, 9, 3] if depths is None else depths __UpperCamelCase = hidden_act __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = drop_path_rate __UpperCamelCase = image_size __UpperCamelCase = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )] __UpperCamelCase , __UpperCamelCase = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE_ , out_indices=SCREAMING_SNAKE_CASE_ , stage_names=self.stage_names )
328
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) lowercase__ : Optional[Any] = ["names", "prefix"] lowercase__ : List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] lowercase__ : Optional[Any] = ["encoding_errors", "on_bad_lines"] lowercase__ : List[str] = ["date_format"] @dataclass class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ): """simple docstring""" _snake_case = "," _snake_case = None _snake_case = "infer" _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = False _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = False _snake_case = True _snake_case = None _snake_case = "." _snake_case = None _snake_case = '"' _snake_case = 0 _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = 0 _snake_case = True _snake_case = False _snake_case = None _snake_case = 10000 _snake_case = None _snake_case = "strict" _snake_case = "error" _snake_case = None def A__ ( self )-> Any: '''simple docstring''' if self.delimiter is not None: __UpperCamelCase = self.delimiter if self.column_names is not None: __UpperCamelCase = self.column_names @property def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ): """simple docstring""" _snake_case = CsvConfig def A__ ( self )-> Any: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) __UpperCamelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ): __UpperCamelCase = data_files if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __UpperCamelCase = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) ) return splits def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.Table: '''simple docstring''' if self.config.features is not None: __UpperCamelCase = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE_ ) for feature in self.config.features.values() ): # cheaper cast __UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __UpperCamelCase = table_cast(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return pa_table def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __UpperCamelCase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE_ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ): __UpperCamelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , iterator=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = pa.Table.from_pandas(SCREAMING_SNAKE_CASE_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ ) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}" ) raise
328
1
import string def A_ ( snake_case : str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): __UpperCamelCase = '''''' for symbol in message: if symbol in string.ascii_uppercase: __UpperCamelCase = string.ascii_uppercase.find(snake_case ) __UpperCamelCase = num - key if num < 0: __UpperCamelCase = num + len(string.ascii_uppercase ) __UpperCamelCase = translated + string.ascii_uppercase[num] else: __UpperCamelCase = translated + symbol print(f"Decryption using Key #{key}: {translated}" ) def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = input('''Encrypted message: ''' ) __UpperCamelCase = message.upper() decrypt(snake_case ) if __name__ == "__main__": import doctest doctest.testmod() main()
328
from __future__ import annotations import math def A_ ( snake_case : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def A_ ( snake_case : int ) -> list[int]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) __UpperCamelCase = [] for num in range(len(snake_case ) ): __UpperCamelCase = 0 while 2 * i * i <= odd_composites[num]: __UpperCamelCase = odd_composites[num] - 2 * i * i if is_prime(snake_case ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case ) == n: return list_nums return [] def A_ ( ) -> int: '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F"{solution() = }")
328
1
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = LEDTokenizer _snake_case = LEDTokenizerFast _snake_case = True def A__ ( self )-> Optional[Any]: '''simple docstring''' super().setUp() __UpperCamelCase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __UpperCamelCase = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) __UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __UpperCamelCase = {'''unk_token''': '''<unk>'''} __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) ) def A__ ( self , **SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , **SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' return "lower newer", "lower newer" @cached_property def A__ ( self )-> Tuple: '''simple docstring''' return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def A__ ( self )-> List[Any]: '''simple docstring''' return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __UpperCamelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE_ , max_length=len(SCREAMING_SNAKE_CASE_ ) , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCamelCase = batch.input_ids.tolist()[0] self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @require_torch def A__ ( self )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , SCREAMING_SNAKE_CASE_ ) self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE_ ) self.assertNotIn('''labels''' , SCREAMING_SNAKE_CASE_ ) self.assertNotIn('''decoder_attention_mask''' , SCREAMING_SNAKE_CASE_ ) @require_torch def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def A__ ( self )-> Dict: '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = ['''A long paragraph for summarization.'''] __UpperCamelCase = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) __UpperCamelCase = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ) __UpperCamelCase = inputs['''input_ids'''] __UpperCamelCase = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def A__ ( self )-> Dict: '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCamelCase = ['''Summary of the text.''', '''Another summary.'''] __UpperCamelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = [[0] * len(SCREAMING_SNAKE_CASE_ ) for x in encoded_output['''input_ids''']] __UpperCamelCase = tokenizer.pad(SCREAMING_SNAKE_CASE_ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Any: '''simple docstring''' pass def A__ ( self )-> Tuple: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): __UpperCamelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = '''A, <mask> AllenNLP sentence.''' __UpperCamelCase = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) __UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE_ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
328
from __future__ import annotations from collections.abc import Callable def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float: '''simple docstring''' __UpperCamelCase = x_start __UpperCamelCase = fnc(snake_case ) __UpperCamelCase = 0.0 for _ in range(snake_case ): # Approximates small segments of curve as linear and solve # for trapezoidal area __UpperCamelCase = (x_end - x_start) / steps + xa __UpperCamelCase = fnc(snake_case ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __UpperCamelCase = xa __UpperCamelCase = fxa return area if __name__ == "__main__": def A_ ( snake_case : Tuple ) -> Optional[Any]: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") lowercase__ : List[str] = 1_0 while i <= 1_0_0_0_0_0: print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}") i *= 1_0
328
1
from datetime import datetime import requests def A_ ( snake_case : str ) -> bytes: '''simple docstring''' __UpperCamelCase = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __UpperCamelCase = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(snake_case ).content if __name__ == "__main__": lowercase__ : List[Any] = input("Enter Video/IGTV url: ").strip() lowercase__ : List[Any] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4" with open(file_name, "wb") as fp: fp.write(download_video(url)) print(F"Done. Video saved to disk as {file_name}.")
328
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = ["model.decoder.embed_positions.weights"] def A_ ( snake_case : Any ) -> List[Any]: '''simple docstring''' if "emb" in name: __UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: __UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: __UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: __UpperCamelCase = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: __UpperCamelCase = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: __UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: __UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: __UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: __UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: __UpperCamelCase = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: __UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def A_ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]: '''simple docstring''' __UpperCamelCase = list(state_dict.keys() ) __UpperCamelCase = {} for key in keys: __UpperCamelCase = state_dict.pop(snake_case ) __UpperCamelCase = rename_keys(snake_case ) if "in_proj_weight" in key: # split fused qkv proj __UpperCamelCase = val[:hidden_size, :] __UpperCamelCase = val[hidden_size : 2 * hidden_size, :] __UpperCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __UpperCamelCase = val else: __UpperCamelCase = val return state_dict, enc_dec_proj_state_dict def A_ ( snake_case : str ) -> MusicgenDecoderConfig: '''simple docstring''' if checkpoint == "small": # default config values __UpperCamelCase = 1024 __UpperCamelCase = 24 __UpperCamelCase = 16 elif checkpoint == "medium": __UpperCamelCase = 1536 __UpperCamelCase = 48 __UpperCamelCase = 24 elif checkpoint == "large": __UpperCamelCase = 2048 __UpperCamelCase = 48 __UpperCamelCase = 32 else: raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) __UpperCamelCase = MusicgenDecoderConfig( hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , ) return config @torch.no_grad() def A_ ( snake_case : Any , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]="cpu" ) -> List[Any]: '''simple docstring''' __UpperCamelCase = MusicGen.get_pretrained(snake_case , device=snake_case ) __UpperCamelCase = decoder_config_from_checkpoint(snake_case ) __UpperCamelCase = fairseq_model.lm.state_dict() __UpperCamelCase , __UpperCamelCase = rename_state_dict( snake_case , hidden_size=decoder_config.hidden_size ) __UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' ) __UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) __UpperCamelCase = MusicgenForCausalLM(snake_case ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(snake_case , strict=snake_case ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(snake_case ) if len(snake_case ) > 0: raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" ) if len(snake_case ) > 0: raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model __UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(snake_case ) # check we can do a forward pass __UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __UpperCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __UpperCamelCase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits if logits.shape != (8, 1, 2048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor __UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) __UpperCamelCase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case ) # set the appropriate bos/pad token ids __UpperCamelCase = 2048 __UpperCamelCase = 2048 # set other default generation config params __UpperCamelCase = int(30 * audio_encoder.config.frame_rate ) __UpperCamelCase = True __UpperCamelCase = 3.0 if pytorch_dump_folder is not None: Path(snake_case ).mkdir(exist_ok=snake_case ) logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(snake_case ) processor.save_pretrained(snake_case ) if repo_id: logger.info(f"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(snake_case ) processor.push_to_hub(snake_case ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) lowercase__ : Tuple = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
328
1
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput lowercase__ : str = "scheduler_config.json" class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 1 _snake_case = 2 _snake_case = 3 _snake_case = 4 _snake_case = 5 @dataclass class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 42 class SCREAMING_SNAKE_CASE__ : """simple docstring""" _snake_case = SCHEDULER_CONFIG_NAME _snake_case = ['dtype'] _snake_case = [] _snake_case = True @classmethod def A__ ( cls , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , )-> str: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = cls.load_config( pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase , __UpperCamelCase = cls.from_config(SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if hasattr(SCREAMING_SNAKE_CASE_ , '''create_state''' ) and getattr(SCREAMING_SNAKE_CASE_ , '''has_state''' , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' self.save_config(save_directory=SCREAMING_SNAKE_CASE_ , push_to_hub=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def A__ ( self )-> Dict: '''simple docstring''' return self._get_compatibles() @classmethod def A__ ( cls )-> str: '''simple docstring''' __UpperCamelCase = list(set([cls.__name__] + cls._compatibles ) ) __UpperCamelCase = importlib.import_module(__name__.split('''.''' )[0] ) __UpperCamelCase = [ getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ] return compatible_classes def A_ ( snake_case : jnp.ndarray , snake_case : Tuple[int] ) -> jnp.ndarray: '''simple docstring''' assert len(snake_case ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(snake_case ) - x.ndim) ) , snake_case ) def A_ ( snake_case : int , snake_case : Optional[Any]=0.999 , snake_case : Dict=jnp.floataa ) -> jnp.ndarray: '''simple docstring''' def alpha_bar(snake_case : Optional[Any] ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 __UpperCamelCase = [] for i in range(snake_case ): __UpperCamelCase = i / num_diffusion_timesteps __UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(snake_case ) / alpha_bar(snake_case ) , snake_case ) ) return jnp.array(snake_case , dtype=snake_case ) @flax.struct.dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" _snake_case = 42 _snake_case = 42 _snake_case = 42 @classmethod def A__ ( cls , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = scheduler.config if config.trained_betas is not None: __UpperCamelCase = jnp.asarray(config.trained_betas , dtype=scheduler.dtype ) elif config.beta_schedule == "linear": __UpperCamelCase = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __UpperCamelCase = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __UpperCamelCase = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype ) else: raise NotImplementedError( F"beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}" ) __UpperCamelCase = 1.0 - betas __UpperCamelCase = jnp.cumprod(SCREAMING_SNAKE_CASE_ , axis=0 ) return cls( alphas=SCREAMING_SNAKE_CASE_ , betas=SCREAMING_SNAKE_CASE_ , alphas_cumprod=SCREAMING_SNAKE_CASE_ , ) def A_ ( snake_case : CommonSchedulerState , snake_case : jnp.ndarray , snake_case : jnp.ndarray , snake_case : jnp.ndarray ) -> Optional[int]: '''simple docstring''' __UpperCamelCase = state.alphas_cumprod __UpperCamelCase = alphas_cumprod[timesteps] ** 0.5 __UpperCamelCase = sqrt_alpha_prod.flatten() __UpperCamelCase = broadcast_to_shape_from_left(snake_case , original_samples.shape ) __UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5 __UpperCamelCase = sqrt_one_minus_alpha_prod.flatten() __UpperCamelCase = broadcast_to_shape_from_left(snake_case , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def A_ ( snake_case : CommonSchedulerState , snake_case : jnp.ndarray , snake_case : jnp.ndarray , snake_case : jnp.ndarray ) -> str: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = get_sqrt_alpha_prod(snake_case , snake_case , snake_case , snake_case ) __UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def A_ ( snake_case : CommonSchedulerState , snake_case : jnp.ndarray , snake_case : jnp.ndarray , snake_case : jnp.ndarray ) -> List[Any]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = get_sqrt_alpha_prod(snake_case , snake_case , snake_case , snake_case ) __UpperCamelCase = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
328
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase__ : List[str] = 1_6 lowercase__ : str = 3_2 def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __UpperCamelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCamelCase = datasets.map( snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case : str ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCamelCase = 16 elif accelerator.mixed_precision != "no": __UpperCamelCase = 8 else: __UpperCamelCase = None return tokenizer.pad( snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) __UpperCamelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase__ : Union[str, Any] = mocked_dataloaders # noqa: F811 def A_ ( snake_case : List[str] , snake_case : List[Any] ) -> Tuple: '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1": __UpperCamelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __UpperCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: __UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['''lr'''] __UpperCamelCase = int(config['''num_epochs'''] ) __UpperCamelCase = int(config['''seed'''] ) __UpperCamelCase = int(config['''batch_size'''] ) set_seed(snake_case ) __UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case ) __UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation __UpperCamelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE __UpperCamelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer __UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case ) # Instantiate scheduler __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0] accelerator.init_trackers(snake_case , snake_case ) # Now we train the model for epoch in range(snake_case ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __UpperCamelCase = 0 for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) __UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case , references=snake_case , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , snake_case ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(snake_case ), '''epoch''': epoch, } , step=snake_case , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def A_ ( ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(snake_case , snake_case ) if __name__ == "__main__": main()
328
1
from __future__ import annotations lowercase__ : Dict = "#" class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self )-> None: '''simple docstring''' __UpperCamelCase = {} def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' __UpperCamelCase = self._trie for char in text: if char not in trie: __UpperCamelCase = {} __UpperCamelCase = trie[char] __UpperCamelCase = True def A__ ( self , SCREAMING_SNAKE_CASE_ )-> tuple | list: '''simple docstring''' __UpperCamelCase = self._trie for char in prefix: if char in trie: __UpperCamelCase = trie[char] else: return [] return self._elements(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> tuple: '''simple docstring''' __UpperCamelCase = [] for c, v in d.items(): __UpperCamelCase = [''' '''] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )] result.extend(SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) lowercase__ : Optional[int] = Trie() lowercase__ : Union[str, Any] = ("depart", "detergent", "daring", "dog", "deer", "deal") for word in words: trie.insert_word(word) def A_ ( snake_case : str ) -> tuple: '''simple docstring''' __UpperCamelCase = trie.find_word(snake_case ) return tuple(string + word for word in suffixes ) def A_ ( ) -> None: '''simple docstring''' print(autocomplete_using_trie('''de''' ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
328
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowercase__ : str = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'whisper' _snake_case = ['past_key_values'] _snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = num_mel_bins __UpperCamelCase = d_model __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase = max_source_positions __UpperCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __UpperCamelCase = classifier_proj_size __UpperCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase = apply_spec_augment __UpperCamelCase = mask_time_prob __UpperCamelCase = mask_time_length __UpperCamelCase = mask_time_min_masks __UpperCamelCase = mask_feature_prob __UpperCamelCase = mask_feature_length __UpperCamelCase = mask_feature_min_masks __UpperCamelCase = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def A__ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' __UpperCamelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: __UpperCamelCase = {0: '''batch'''} else: __UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' ) return common_inputs def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]: '''simple docstring''' __UpperCamelCase = OrderedDict() __UpperCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = encoder_inputs['''input_features'''].shape[2] __UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __UpperCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = encoder_inputs.pop('''input_features''' ) __UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: __UpperCamelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def A__ ( self )-> float: '''simple docstring''' return 1E-3
328
1
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging lowercase__ : str = "\\n\n" lowercase__ : Union[str, Any] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" lowercase__ : List[Any] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def A__ ( self )-> Tuple: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_=None )-> Any: '''simple docstring''' if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": __UpperCamelCase = '''cuda''' else: __UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' __UpperCamelCase = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model.to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: __UpperCamelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(SCREAMING_SNAKE_CASE_ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" __UpperCamelCase = model.config.max_length - 1 else: __UpperCamelCase = model.config.max_length __UpperCamelCase = tokenizer( SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = encodings['''input_ids'''] __UpperCamelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." __UpperCamelCase = [] __UpperCamelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ): __UpperCamelCase = min(start_index + batch_size , len(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = encoded_texts[start_index:end_index] __UpperCamelCase = attn_masks[start_index:end_index] if add_start_token: __UpperCamelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) __UpperCamelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(SCREAMING_SNAKE_CASE_ ), attn_mask] , dim=1 ) __UpperCamelCase = encoded_batch with torch.no_grad(): __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).logits __UpperCamelCase = out_logits[..., :-1, :].contiguous() __UpperCamelCase = labels[..., 1:].contiguous() __UpperCamelCase = attn_mask[..., 1:].contiguous() __UpperCamelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , SCREAMING_SNAKE_CASE_ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(SCREAMING_SNAKE_CASE_ )}
328
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : Any = logging.get_logger(__name__) lowercase__ : Tuple = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'xlnet' _snake_case = ['mems'] _snake_case = { 'n_token': 'vocab_size', # Backward compatibility 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="bi" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="tanh" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , )-> List[str]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = d_model __UpperCamelCase = n_layer __UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) __UpperCamelCase = d_model // n_head __UpperCamelCase = ff_activation __UpperCamelCase = d_inner __UpperCamelCase = untie_r __UpperCamelCase = attn_type __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = dropout __UpperCamelCase = mem_len __UpperCamelCase = reuse_len __UpperCamelCase = bi_data __UpperCamelCase = clamp_len __UpperCamelCase = same_length __UpperCamelCase = summary_type __UpperCamelCase = summary_use_proj __UpperCamelCase = summary_activation __UpperCamelCase = summary_last_dropout __UpperCamelCase = start_n_top __UpperCamelCase = end_n_top __UpperCamelCase = bos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = kwargs['''use_cache'''] __UpperCamelCase = use_mems_eval __UpperCamelCase = use_mems_train super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def A__ ( self )-> Optional[Any]: '''simple docstring''' logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' raise NotImplementedError( F"The model {self.model_type} is one of the few models that has no sequence length limit." )
328
1
from statistics import mean, stdev def A_ ( snake_case : list , snake_case : int = 3 ) -> list: '''simple docstring''' __UpperCamelCase = min(snake_case ) __UpperCamelCase = max(snake_case ) # normalize data return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data] def A_ ( snake_case : list , snake_case : int = 3 ) -> list: '''simple docstring''' __UpperCamelCase = mean(snake_case ) __UpperCamelCase = stdev(snake_case ) # standardize data return [round((x - mu) / (sigma) , snake_case ) for x in data]
328
from typing import Any class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None def __repr__( self )-> str: '''simple docstring''' return F"Node({self.data})" class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = None def __iter__( self )-> Any: '''simple docstring''' __UpperCamelCase = self.head while node: yield node.data __UpperCamelCase = node.next def __len__( self )-> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self )-> str: '''simple docstring''' return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] ) def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) __UpperCamelCase = self.head for _ in range(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = current.next __UpperCamelCase = data def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(0 , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) __UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ ) if self.head is None: __UpperCamelCase = new_node elif index == 0: __UpperCamelCase = self.head # link new_node to head __UpperCamelCase = new_node else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = new_node def A__ ( self )-> None: # print every node data '''simple docstring''' print(self ) def A__ ( self )-> Any: '''simple docstring''' return self.delete_nth(0 ) def A__ ( self )-> Any: # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def A__ ( self , SCREAMING_SNAKE_CASE_ = 0 )-> Any: '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) __UpperCamelCase = self.head # default first node if index == 0: __UpperCamelCase = self.head.next else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = temp.next.next return delete_node.data def A__ ( self )-> bool: '''simple docstring''' return self.head is None def A__ ( self )-> None: '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = self.head while current: # Store the current node's next node. __UpperCamelCase = current.next # Make the current node's next point backwards __UpperCamelCase = prev # Make the previous node be the current node __UpperCamelCase = current # Make the current node the next node (to progress iteration) __UpperCamelCase = next_node # Return prev in order to put the head at the end __UpperCamelCase = prev def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = LinkedList() assert linked_list.is_empty() is True assert str(snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(snake_case ) == i linked_list.insert_nth(snake_case , i + 1 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(snake_case ) == 9 assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __UpperCamelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(snake_case ) == "->".join(str(snake_case ) for i in range(-8 , 1 ) ) def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = [ -9, 100, Node(77345112 ), '''dlrow olleH''', 7, 5555, 0, -192.55555, '''Hello, world!''', 77.9, Node(10 ), None, None, 12.20, ] __UpperCamelCase = LinkedList() for i in test_input: linked_list.insert_tail(snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __UpperCamelCase = linked_list.delete_head() assert result == -9 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __UpperCamelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __UpperCamelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(snake_case ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def A_ ( ) -> Any: '''simple docstring''' from doctest import testmod testmod() __UpperCamelCase = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(snake_case ) print('''\nReading/changing Node data using indexing:''' ) print(f"Element at Position 1: {linked_list[1]}" ) __UpperCamelCase = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(snake_case ) print(f"length of linked_list is : {len(snake_case )}" ) if __name__ == "__main__": main()
328
1
from __future__ import annotations from collections.abc import Callable lowercase__ : Tuple = list[list[float | int]] def A_ ( snake_case : Matrix , snake_case : Matrix ) -> Matrix: '''simple docstring''' __UpperCamelCase = len(snake_case ) __UpperCamelCase = [[0 for _ in range(size + 1 )] for _ in range(snake_case )] __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 for row in range(snake_case ): for col in range(snake_case ): __UpperCamelCase = matrix[row][col] __UpperCamelCase = vector[row][0] __UpperCamelCase = 0 __UpperCamelCase = 0 while row < size and col < size: # pivoting __UpperCamelCase = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case , snake_case ) )[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: __UpperCamelCase , __UpperCamelCase = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , snake_case ): __UpperCamelCase = augmented[rowa][col] / augmented[row][col] __UpperCamelCase = 0 for cola in range(col + 1 , size + 1 ): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , snake_case ): for row in range(snake_case ): __UpperCamelCase = augmented[row][col] / augmented[col][col] for cola in range(snake_case , size + 1 ): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(snake_case ) ] def A_ ( snake_case : list[int] ) -> Callable[[int], int]: '''simple docstring''' __UpperCamelCase = len(snake_case ) __UpperCamelCase = [[0 for _ in range(snake_case )] for _ in range(snake_case )] __UpperCamelCase = [[0] for _ in range(snake_case )] __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 for x_val, y_val in enumerate(snake_case ): for col in range(snake_case ): __UpperCamelCase = (x_val + 1) ** (size - col - 1) __UpperCamelCase = y_val __UpperCamelCase = solve(snake_case , snake_case ) def interpolated_func(snake_case : int ) -> int: return sum( round(coeffs[x_val][0] ) * (var ** (size - x_val - 1)) for x_val in range(snake_case ) ) return interpolated_func def A_ ( snake_case : int ) -> int: '''simple docstring''' return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def A_ ( snake_case : Callable[[int], int] = question_function , snake_case : int = 10 ) -> int: '''simple docstring''' __UpperCamelCase = [func(snake_case ) for x_val in range(1 , order + 1 )] __UpperCamelCase = [ interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 ) ] __UpperCamelCase = 0 __UpperCamelCase = 42 __UpperCamelCase = 42 for poly in polynomials: __UpperCamelCase = 1 while func(snake_case ) == poly(snake_case ): x_val += 1 ret += poly(snake_case ) return ret if __name__ == "__main__": print(F"{solution() = }")
328
import math def A_ ( snake_case : int ) -> bool: '''simple docstring''' return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num def A_ ( snake_case : int ) -> bool: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = n while left <= right: __UpperCamelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
328
1
from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = ['flax', 'transformers'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Optional[Any]: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = ['flax', 'transformers'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = ['flax', 'transformers'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) class SCREAMING_SNAKE_CASE__ ( metaclass=SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = ['flax', 'transformers'] def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def A__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def A__ ( cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' requires_backends(cls , ['''flax''', '''transformers'''] )
328
def A_ ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowercase__ : List[str] = generate_large_matrix() lowercase__ : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A_ ( snake_case : list[list[int]] ) -> None: '''simple docstring''' assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid ) assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) ) def A_ ( snake_case : list[int] ) -> int: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __UpperCamelCase = (left + right) // 2 __UpperCamelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __UpperCamelCase = mid + 1 else: __UpperCamelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(snake_case ) def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = len(grid[0] ) for i in range(len(snake_case ) ): __UpperCamelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(snake_case ) * len(grid[0] )) - total def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' __UpperCamelCase = 0 for row in grid: for i, number in enumerate(snake_case ): if number < 0: total += len(snake_case ) - i break return total def A_ ( ) -> None: '''simple docstring''' from timeit import timeit print('''Running benchmarks''' ) __UpperCamelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 ) print(f"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
328
1
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path lowercase__ : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) lowercase__ : list[int] = [ord(letter) for letter in string.ascii_lowercase] lowercase__ : set[int] = {ord(char) for char in VALID_CHARS} lowercase__ : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def A_ ( snake_case : list[int] , snake_case : tuple[int, ...] ) -> str | None: '''simple docstring''' __UpperCamelCase = "" __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 for keychar, cipherchar in zip(cycle(snake_case ) , snake_case ): __UpperCamelCase = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(snake_case ) return decoded def A_ ( snake_case : list[int] ) -> list[str]: '''simple docstring''' __UpperCamelCase = [] for key in product(snake_case , repeat=3 ): __UpperCamelCase = try_key(snake_case , snake_case ) if encoded is not None: possibles.append(snake_case ) return possibles def A_ ( snake_case : list[str] , snake_case : str ) -> list[str]: '''simple docstring''' return [possible for possible in possibles if common_word in possible.lower()] def A_ ( snake_case : str = "p059_cipher.txt" ) -> int: '''simple docstring''' __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = Path(snake_case ).parent.joinpath(snake_case ).read_text(encoding='''utf-8''' ) __UpperCamelCase = [int(snake_case ) for number in data.strip().split(''',''' )] __UpperCamelCase = filter_valid_chars(snake_case ) for common_word in COMMON_WORDS: __UpperCamelCase = filter_common_word(snake_case , snake_case ) if len(snake_case ) == 1: break __UpperCamelCase = possibles[0] return sum(ord(snake_case ) for char in decoded_text ) if __name__ == "__main__": print(F"{solution() = }")
328
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase__ : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = size if size is not None else {'''shortest_edge''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = crop_pct __UpperCamelCase = resample __UpperCamelCase = do_center_crop __UpperCamelCase = crop_size __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) if crop_pct is not None: if "shortest_edge" in size: __UpperCamelCase = int(size['''shortest_edge'''] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __UpperCamelCase = int(size['''height'''] / crop_pct ) else: __UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct )) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) else: if "shortest_edge" in size: __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ ) elif "height" in size and "width" in size: __UpperCamelCase = (size['''height'''], size['''width''']) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str: '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image: '''simple docstring''' __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = image_mean if image_mean is not None else self.image_mean __UpperCamelCase = image_std if image_std is not None else self.image_std __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else self.crop_size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_pct is None: raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: __UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
328
1
# flake8: noqa # Lint as: python3 lowercase__ : Union[str, Any] = [ "VerificationMode", "Version", "disable_progress_bar", "enable_progress_bar", "is_progress_bar_enabled", "experimental", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
328
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowercase__ : Any = getLogger(__name__) lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu" def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict: '''simple docstring''' __UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' ) __UpperCamelCase = str(snake_case ) __UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case ) if fpaa: __UpperCamelCase = model.half() __UpperCamelCase = AutoTokenizer.from_pretrained(snake_case ) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. __UpperCamelCase = time.time() # update config with task specific params use_task_specific_params(snake_case , snake_case ) if prefix is None: __UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ): __UpperCamelCase = [prefix + text for text in examples_chunk] __UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case ) __UpperCamelCase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , ) __UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() __UpperCamelCase = int(time.time() - start_time ) # seconds __UpperCamelCase = len(snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def A_ ( ) -> Tuple: '''simple docstring''' return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def A_ ( snake_case : str=True ) -> int: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __UpperCamelCase , __UpperCamelCase = parser.parse_known_args() __UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case ) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}" ) __UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __UpperCamelCase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) __UpperCamelCase = generate_summaries_or_translations( snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , ) if args.reference_path is None: return {} # Compute scores __UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge __UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()] __UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )] __UpperCamelCase = score_fn(snake_case , snake_case ) scores.update(snake_case ) if args.dump_args: scores.update(snake_case ) if args.info: __UpperCamelCase = args.info if verbose: print(snake_case ) if args.score_path is not None: json.dump(snake_case , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
328
1
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py lowercase__ : Any = "src/diffusers" # Matches is_xxx_available() lowercase__ : Union[str, Any] = re.compile(R"is\_([a-z_]*)_available\(\)") # Matches from xxx import bla lowercase__ : Dict = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") lowercase__ : str = "\n{0} = None\n" lowercase__ : str = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n" lowercase__ : int = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" def A_ ( snake_case : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCamelCase = _re_backend.findall(snake_case ) if len(snake_case ) == 0: return None return "_and_".join(snake_case ) def A_ ( ) -> Optional[Any]: '''simple docstring''' with open(os.path.join(snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCamelCase = f.readlines() # Get to the point we do the actual imports for type checking __UpperCamelCase = 0 __UpperCamelCase = {} # Go through the end of the file while line_index < len(snake_case ): # If the line contains is_backend_available, we grab all objects associated with the `else` block __UpperCamelCase = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 __UpperCamelCase = [] # Until we unindent, add backend objects to the list while line_index < len(snake_case ) and len(lines[line_index] ) > 1: __UpperCamelCase = lines[line_index] __UpperCamelCase = _re_single_line_import.search(snake_case ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(snake_case ) > 0: __UpperCamelCase = objects else: line_index += 1 return backend_specific_objects def A_ ( snake_case : List[Any] , snake_case : Dict ) -> Dict: '''simple docstring''' if name.isupper(): return DUMMY_CONSTANT.format(snake_case ) elif name.islower(): return DUMMY_FUNCTION.format(snake_case , snake_case ) else: return DUMMY_CLASS.format(snake_case , snake_case ) def A_ ( snake_case : str=None ) -> List[str]: '''simple docstring''' if backend_specific_objects is None: __UpperCamelCase = read_init() # For special correspondence backend to module name as used in the function requires_modulename __UpperCamelCase = {} for backend, objects in backend_specific_objects.items(): __UpperCamelCase = '''[''' + ''', '''.join(f"\"{b}\"" for b in backend.split('''_and_''' ) ) + ''']''' __UpperCamelCase = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(snake_case , snake_case ) for o in objects] ) __UpperCamelCase = dummy_file return dummy_files def A_ ( snake_case : str=False ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py __UpperCamelCase = {'''torch''': '''pt'''} # Locate actual dummy modules and read their content. __UpperCamelCase = os.path.join(snake_case , '''utils''' ) __UpperCamelCase = { backend: os.path.join(snake_case , f"dummy_{short_names.get(snake_case , snake_case )}_objects.py" ) for backend in dummy_files.keys() } __UpperCamelCase = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(snake_case ): with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __UpperCamelCase = f.read() else: __UpperCamelCase = '''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"Updating diffusers.utils.dummy_{short_names.get(snake_case , snake_case )}_objects.py as the main " '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' f"diffusers.utils.dummy_{short_names.get(snake_case , snake_case )}_objects.py. Run `make fix-copies` " '''to fix this.''' ) if __name__ == "__main__": lowercase__ : Tuple = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") lowercase__ : Union[str, Any] = parser.parse_args() check_dummies(args.fix_and_overwrite)
328
from math import factorial def A_ ( snake_case : int = 100 ) -> int: '''simple docstring''' return sum(int(snake_case ) for x in str(factorial(snake_case ) ) ) if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
328
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase__ : Tuple = { "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"], "tokenization_tapas": ["TapasTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[Any] = [ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", "TapasModel", "TapasPreTrainedModel", "load_tf_weights_in_tapas", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : int = [ "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", "TFTapasModel", "TFTapasPreTrainedModel", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys lowercase__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
328
def A_ ( snake_case : list ) -> list: '''simple docstring''' __UpperCamelCase = len(snake_case ) for i in range(1 , snake_case ): __UpperCamelCase = collection[i] __UpperCamelCase = 0 __UpperCamelCase = i - 1 while low <= high: __UpperCamelCase = (low + high) // 2 if val < collection[mid]: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 for j in range(snake_case , snake_case , -1 ): __UpperCamelCase = collection[j - 1] __UpperCamelCase = val return collection if __name__ == "__main__": lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip() lowercase__ : str = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
328
1
def A_ ( snake_case : Optional[Any] ) -> str: '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase = set({'''(''', '''[''', '''{'''} ) __UpperCamelCase = set({''')''', ''']''', '''}'''} ) __UpperCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(snake_case ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(snake_case ) == 0 or (len(snake_case ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(snake_case ) == 0 def A_ ( ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = input('''Enter sequence of brackets: ''' ) if is_balanced(snake_case ): print(snake_case , '''is balanced''' ) else: print(snake_case , '''is not balanced''' ) if __name__ == "__main__": main()
328
from __future__ import annotations from collections import deque class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' __UpperCamelCase = [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(SCREAMING_SNAKE_CASE_ ) self.set_fail_transitions() def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int | None: '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' __UpperCamelCase = 0 for character in keyword: __UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __UpperCamelCase = len(self.adlist ) - 1 else: __UpperCamelCase = next_state self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> None: '''simple docstring''' __UpperCamelCase = deque() for node in self.adlist[0]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = 0 while q: __UpperCamelCase = q.popleft() for child in self.adlist[r]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.adlist[r]['''fail_state'''] while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) is None and state != 0 ): __UpperCamelCase = self.adlist[state]['''fail_state'''] __UpperCamelCase = self.find_next_state( SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: __UpperCamelCase = 0 __UpperCamelCase = ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict[str, list[int]]: '''simple docstring''' __UpperCamelCase = {} # returns a dict with keywords and list of its occurrences __UpperCamelCase = 0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None and current_state != 0 ): __UpperCamelCase = self.adlist[current_state]['''fail_state'''] __UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) if next_state is None: __UpperCamelCase = 0 else: __UpperCamelCase = next_state for key in self.adlist[current_state]["output"]: if key not in result: __UpperCamelCase = [] result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
328
1
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : Optional[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} lowercase__ : Dict = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } lowercase__ : List[Any] = { "allenai/longformer-base-4096": 4_0_9_6, "allenai/longformer-large-4096": 4_0_9_6, "allenai/longformer-large-4096-finetuned-triviaqa": 4_0_9_6, "allenai/longformer-base-4096-extra.pos.embd.only": 4_0_9_6, "allenai/longformer-large-4096-extra.pos.embd.only": 4_0_9_6, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def A_ ( ) -> Dict: '''simple docstring''' __UpperCamelCase = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) __UpperCamelCase = bs[:] __UpperCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case ) cs.append(2**8 + n ) n += 1 __UpperCamelCase = [chr(snake_case ) for n in cs] return dict(zip(snake_case , snake_case ) ) def A_ ( snake_case : List[str] ) -> Tuple: '''simple docstring''' __UpperCamelCase = set() __UpperCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCamelCase = char return pairs class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ['input_ids', 'attention_mask'] def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , )-> List[str]: '''simple docstring''' __UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else bos_token __UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else eos_token __UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else sep_token __UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else cls_token __UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else unk_token __UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle: __UpperCamelCase = json.load(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = {v: k for k, v in self.encoder.items()} __UpperCamelCase = errors # how to handle errors in decoding __UpperCamelCase = bytes_to_unicode() __UpperCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle: __UpperCamelCase = merges_handle.read().split('''\n''' )[1:-1] __UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges] __UpperCamelCase = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) __UpperCamelCase = {} __UpperCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCamelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property def A__ ( self )-> Optional[Any]: '''simple docstring''' return len(self.encoder ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' if token in self.cache: return self.cache[token] __UpperCamelCase = tuple(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = get_pairs(SCREAMING_SNAKE_CASE_ ) if not pairs: return token while True: __UpperCamelCase = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __UpperCamelCase , __UpperCamelCase = bigram __UpperCamelCase = [] __UpperCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE_ ): try: __UpperCamelCase = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCamelCase = j if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCamelCase = tuple(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = new_word if len(SCREAMING_SNAKE_CASE_ ) == 1: break else: __UpperCamelCase = get_pairs(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = word return word def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = [] for token in re.findall(self.pat , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) return bpe_tokens def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' return self.decoder.get(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = ''''''.join(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> Tuple[str]: '''simple docstring''' if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __UpperCamelCase = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCamelCase = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' ) __UpperCamelCase = 0 with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ''' Please check that the tokenizer is not corrupted!''' ) __UpperCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] __UpperCamelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False )-> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> List[int]: '''simple docstring''' __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()): __UpperCamelCase = ''' ''' + text return (text, kwargs)
328
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , )-> Dict: '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = seq_length __UpperCamelCase = is_training __UpperCamelCase = use_input_mask __UpperCamelCase = use_token_type_ids __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_act __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = type_sequence_label_size __UpperCamelCase = initializer_range __UpperCamelCase = num_labels __UpperCamelCase = num_choices __UpperCamelCase = scope def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase = None if self.use_input_mask: __UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None if self.use_labels: __UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __UpperCamelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self )-> str: '''simple docstring''' return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' __UpperCamelCase = DistilBertModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Tuple: '''simple docstring''' __UpperCamelCase = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.num_labels __UpperCamelCase = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.num_choices __UpperCamelCase = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCamelCase = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = self.prepare_config_and_inputs() ((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) = config_and_inputs __UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) _snake_case = ( { 'feature-extraction': DistilBertModel, 'fill-mask': DistilBertForMaskedLM, 'question-answering': DistilBertForQuestionAnswering, 'text-classification': DistilBertForSequenceClassification, 'token-classification': DistilBertForTokenClassification, 'zero-shot': DistilBertForSequenceClassification, } if is_torch_available() else {} ) _snake_case = True _snake_case = True _snake_case = True _snake_case = True def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = DistilBertModelTester(self ) __UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 ) def A__ ( self )-> Dict: '''simple docstring''' self.config_tester.run_common_tests() def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) @slow def A__ ( self )-> List[str]: '''simple docstring''' for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @slow @require_torch_gpu def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __UpperCamelCase = True __UpperCamelCase = model_class(config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.jit.trace( SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) ) __UpperCamelCase = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ ) loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @slow def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] __UpperCamelCase = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = torch.tensor( [[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
328
1
def A_ ( snake_case : int = 4000000 ) -> int: '''simple docstring''' __UpperCamelCase = [] __UpperCamelCase , __UpperCamelCase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(snake_case ) __UpperCamelCase , __UpperCamelCase = b, a + b return sum(snake_case ) if __name__ == "__main__": print(F"{solution() = }")
328
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed lowercase__ : Optional[Any] = logging.getLogger(__name__) def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int: '''simple docstring''' def get_dataset(snake_case : Optional[int] ): __UpperCamelCase = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) __UpperCamelCase = get_dataset(snake_case ) __UpperCamelCase = get_dataset(snake_case ) __UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) __UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) return (train_dataloader, valid_dataloader) def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any: '''simple docstring''' __UpperCamelCase = [] for epoch in range(snake_case ): # Train quickly model.train() for batch in dataloader: __UpperCamelCase , __UpperCamelCase = batch __UpperCamelCase = model(snake_case ) __UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case ) accelerator.backward(snake_case ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class SCREAMING_SNAKE_CASE__ ( nn.Module ): """simple docstring""" def __init__( self )-> Tuple: '''simple docstring''' super().__init__() __UpperCamelCase = nn.Parameter(torch.randn(1 ) ) __UpperCamelCase = nn.Parameter(torch.randn(1 ) ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' return x * self.a + self.b class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def A__ ( self )-> Tuple: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline __UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def A__ ( self )-> Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() # Train baseline __UpperCamelCase = Accelerator() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial __UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() __UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() # Train partially set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = Accelerator() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) accelerator.load_state(SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save everything __UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) # Load everything back in and make sure all states work accelerator.load_state(SCREAMING_SNAKE_CASE_ ) test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> List[str]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() __UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() # Train partially set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item() __UpperCamelCase = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Tuple: '''simple docstring''' __UpperCamelCase = torch.tensor([1, 2, 3] ) __UpperCamelCase = torch.tensor([2, 3, 4] ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(net.parameters() ) __UpperCamelCase = Accelerator() with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve: accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) __UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 ) __UpperCamelCase , __UpperCamelCase = dummy_dataloaders() __UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() __UpperCamelCase = scheduler.state_dict() train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() ) def A__ ( self )-> List[str]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) __UpperCamelCase = DummyModel() __UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 ) # Train baseline __UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) if __name__ == "__main__": lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing" lowercase__ : List[Any] = DummyModel() lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3) lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) lowercase__ , lowercase__ : str = dummy_dataloaders() lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: lowercase__ : int = group["params"][0].device break assert param_device.type == accelerator.device.type lowercase__ : Union[str, Any] = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: lowercase__ : Any = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: lowercase__ : List[Any] = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
328
1
import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowercase__ : Any = logging.getLogger(__name__) lowercase__ : Union[str, Any] = 5_0 # max width of layer names lowercase__ : Dict = 7_0 # max width of quantizer names def A_ ( snake_case : List[Any] ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=snake_case , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=snake_case , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=snake_case , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=snake_case , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=snake_case , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=snake_case , type=snake_case , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=snake_case , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def A_ ( snake_case : int ) -> Any: '''simple docstring''' if args.calibrator == "max": __UpperCamelCase = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) __UpperCamelCase = '''histogram''' elif args.calibrator == "mse": __UpperCamelCase = '''histogram''' else: raise ValueError(f"Invalid calibrator {args.calibrator}" ) __UpperCamelCase = QuantDescriptor(num_bits=args.aprec , calib_method=snake_case ) __UpperCamelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(snake_case ) quant_nn.QuantLinear.set_default_quant_desc_weight(snake_case ) def A_ ( snake_case : Dict , snake_case : str , snake_case : List[str]=False , snake_case : Any=False ) -> Optional[int]: '''simple docstring''' logger.info('''Configuring Model for Quantization''' ) logger.info(f"using quantization package {pytorch_quantization.__file__}" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(snake_case , ['''embeddings'''] , which='''weight''' , _disabled=snake_case ) if args.quant_disable: set_quantizer_by_name(snake_case , [''''''] , _disabled=snake_case ) if args.quant_disable_keyword: set_quantizer_by_name(snake_case , args.quant_disable_keyword , _disabled=snake_case ) if args.quant_disable_layer_module: set_quantizer_by_name(snake_case , [r'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=snake_case ) if args.quant_enable_layer_module: set_quantizer_by_name(snake_case , [r'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=snake_case ) if args.recalibrate_weights: recalibrate_weights(snake_case ) if args.fuse_qkv: fuse_qkv(snake_case , snake_case ) if args.clip_gelu: clip_gelu(snake_case , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(snake_case ) def A_ ( snake_case : str ) -> List[str]: '''simple docstring''' logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f"{name:80}: {module}" ) def A_ ( snake_case : Optional[int] , snake_case : int ) -> List[str]: '''simple docstring''' logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(snake_case ) def A_ ( snake_case : Union[str, Any] , snake_case : List[Any] ) -> Any: '''simple docstring''' def fusea(snake_case : Dict , snake_case : Optional[int] , snake_case : Tuple ): for mod in [qq, qk, qv]: if not hasattr(snake_case , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return __UpperCamelCase = qq._amax.detach().item() __UpperCamelCase = qk._amax.detach().item() __UpperCamelCase = qv._amax.detach().item() __UpperCamelCase = max(snake_case , snake_case , snake_case ) qq._amax.fill_(snake_case ) qk._amax.fill_(snake_case ) qv._amax.fill_(snake_case ) logger.info(f" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f"FUSE_QKV: {name:{name_width}}" ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def A_ ( snake_case : Any , snake_case : int ) -> Optional[Any]: '''simple docstring''' for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): __UpperCamelCase = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=snake_case ) __UpperCamelCase = mod._input_quantizer._amax.data.detach().item() logger.info(f"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" ) def A_ ( snake_case : int ) -> Any: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(snake_case , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: __UpperCamelCase = mod.weight.shape[0] __UpperCamelCase = mod._weight_quantizer._amax.detach() __UpperCamelCase = torch.ones(snake_case , dtype=amax.dtype , device=amax.device ) * amax print(f"expanding {name} {amax} -> {mod._weight_quantizer._amax}" ) def A_ ( snake_case : Union[str, Any] ) -> Any: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(snake_case , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) __UpperCamelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) __UpperCamelCase = set(range(len(mod.weight.size() ) ) ) - axis_set __UpperCamelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=snake_case , keepdims=snake_case ).detach() logger.info(f"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" ) __UpperCamelCase = amax def A_ ( snake_case : str , snake_case : int=25 , snake_case : Optional[int]=180 , snake_case : int=None ) -> List[Any]: '''simple docstring''' if ignore is None: __UpperCamelCase = [] elif not isinstance(snake_case , snake_case ): __UpperCamelCase = [ignore] __UpperCamelCase = 0 for name, mod in model.named_modules(): if not hasattr(snake_case , '''weight''' ): continue __UpperCamelCase = max(snake_case , len(snake_case ) ) for name, mod in model.named_modules(): __UpperCamelCase = getattr(snake_case , '''_input_quantizer''' , snake_case ) __UpperCamelCase = getattr(snake_case , '''_weight_quantizer''' , snake_case ) if not hasattr(snake_case , '''weight''' ): continue if type(snake_case ) in ignore: continue if [True for s in ignore if type(snake_case ) is str and s in name]: continue __UpperCamelCase = f"Act:{input_q.extra_repr()}" __UpperCamelCase = f"Wgt:{weight_q.extra_repr()}" __UpperCamelCase = f"{name:{name_width}} {act_str} {wgt_str}" if len(snake_case ) <= line_width: logger.info(snake_case ) else: logger.info(f"{name:{name_width}} {act_str}" ) logger.info(f"{' ':{name_width}} {wgt_str}" ) def A_ ( snake_case : Dict ) -> List[Any]: '''simple docstring''' __UpperCamelCase = 0 for name, mod in model.named_modules(): if isinstance(snake_case , pytorch_quantization.nn.TensorQuantizer ): print(f"{name:80} {mod}" ) count += 1 print(f"{count} TensorQuantizers found in model" ) def A_ ( snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : str ) -> Dict: '''simple docstring''' __UpperCamelCase = getattr(snake_case , snake_case , snake_case ) if quantizer_mod is not None: assert hasattr(snake_case , snake_case ) setattr(snake_case , snake_case , snake_case ) else: logger.warning(f"{name} has no {quantizer}" ) def A_ ( snake_case : Dict , snake_case : List[Any] , snake_case : Optional[int]="both" , **snake_case : int ) -> int: '''simple docstring''' __UpperCamelCase = f"Warning: changing {which} quantizers of {name:{qname_width}}" for k, v in kwargs.items(): s += f" {k}={v}" if which in ["input", "both"]: set_quantizer(snake_case , snake_case , '''_input_quantizer''' , snake_case , snake_case ) if which in ["weight", "both"]: set_quantizer(snake_case , snake_case , '''_weight_quantizer''' , snake_case , snake_case ) logger.info(snake_case ) def A_ ( snake_case : Dict , snake_case : Union[str, Any] , **snake_case : Tuple ) -> Tuple: '''simple docstring''' for name, mod in model.named_modules(): if hasattr(snake_case , '''_input_quantizer''' ) or hasattr(snake_case , '''_weight_quantizer''' ): for n in names: if re.search(snake_case , snake_case ): set_quantizers(snake_case , snake_case , **snake_case ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(snake_case , snake_case ): __UpperCamelCase = f"Warning: changing {name:{name_width}}" for k, v in kwargs.items(): s += f" {k}={v}" setattr(snake_case , snake_case , snake_case ) logger.info(snake_case )
328
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , **SCREAMING_SNAKE_CASE_ , )-> Optional[int]: '''simple docstring''' super().__init__(features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = Sql( cache_dir=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , sql=SCREAMING_SNAKE_CASE_ , con=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = None self.builder.download_and_prepare( download_config=SCREAMING_SNAKE_CASE_ , download_mode=SCREAMING_SNAKE_CASE_ , verification_mode=SCREAMING_SNAKE_CASE_ , base_path=SCREAMING_SNAKE_CASE_ , ) # Build dataset for splits __UpperCamelCase = self.builder.as_dataset( split='''train''' , verification_mode=SCREAMING_SNAKE_CASE_ , in_memory=self.keep_in_memory ) return dataset class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]: '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(F"num_proc {num_proc} must be an integer > 0." ) __UpperCamelCase = dataset __UpperCamelCase = name __UpperCamelCase = con __UpperCamelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __UpperCamelCase = num_proc __UpperCamelCase = to_sql_kwargs def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = self.to_sql_kwargs.pop('''sql''' , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.to_sql_kwargs.pop('''con''' , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.to_sql_kwargs.pop('''index''' , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self._write(index=SCREAMING_SNAKE_CASE_ , **self.to_sql_kwargs ) return written def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = args __UpperCamelCase = {**to_sql_kwargs, '''if_exists''': '''append'''} if offset > 0 else to_sql_kwargs __UpperCamelCase = query_table( table=self.dataset.data , key=slice(SCREAMING_SNAKE_CASE_ , offset + self.batch_size ) , indices=self.dataset._indices , ) __UpperCamelCase = batch.to_pandas() __UpperCamelCase = df.to_sql(self.name , self.con , index=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) return num_rows or len(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' __UpperCamelCase = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: __UpperCamelCase , __UpperCamelCase = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating SQL from Arrow format''' , ): written += num_rows return written
328
1
def A_ ( snake_case : int ) -> int: '''simple docstring''' __UpperCamelCase = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def A_ ( snake_case : int = 100 ) -> int: '''simple docstring''' __UpperCamelCase = 1 __UpperCamelCase = 2 for i in range(2 , max_n + 1 ): __UpperCamelCase = pre_numerator __UpperCamelCase = 2 * i // 3 if i % 3 == 0 else 1 __UpperCamelCase = cur_numerator __UpperCamelCase = e_cont * pre_numerator + temp return sum_digits(snake_case ) if __name__ == "__main__": print(F"{solution() = }")
328
def A_ ( snake_case : str ) -> int: '''simple docstring''' assert column_title.isupper() __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 1 __UpperCamelCase = 0 while index >= 0: __UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
328
1
def A_ ( snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool: '''simple docstring''' __UpperCamelCase = set() # Replace all the whitespace in our sentence __UpperCamelCase = input_str.replace(''' ''' , '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(snake_case ) == 26 def A_ ( snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool: '''simple docstring''' __UpperCamelCase = [False] * 26 for char in input_str: if char.islower(): __UpperCamelCase = True elif char.isupper(): __UpperCamelCase = True return all(snake_case ) def A_ ( snake_case : str = "The quick brown fox jumps over the lazy dog" , ) -> bool: '''simple docstring''' return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def A_ ( ) -> None: '''simple docstring''' from timeit import timeit __UpperCamelCase = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''' , setup=snake_case ) ) print(timeit('''is_pangram_faster()''' , setup=snake_case ) ) print(timeit('''is_pangram_fastest()''' , setup=snake_case ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
328
def A_ ( snake_case : int ) -> None: '''simple docstring''' __UpperCamelCase = generate_pascal_triangle(snake_case ) for row_idx in range(snake_case ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=''' ''' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=''' ''' ) else: print(triangle[row_idx][col_idx] , end='''''' ) print() def A_ ( snake_case : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __UpperCamelCase = [] for current_row_idx in range(snake_case ): __UpperCamelCase = populate_current_row(snake_case , snake_case ) triangle.append(snake_case ) return triangle def A_ ( snake_case : list[list[int]] , snake_case : int ) -> list[int]: '''simple docstring''' __UpperCamelCase = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 __UpperCamelCase , __UpperCamelCase = 1, 1 for current_col_idx in range(1 , snake_case ): calculate_current_element( snake_case , snake_case , snake_case , snake_case ) return current_row def A_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int , snake_case : int , ) -> None: '''simple docstring''' __UpperCamelCase = triangle[current_row_idx - 1][current_col_idx - 1] __UpperCamelCase = triangle[current_row_idx - 1][current_col_idx] __UpperCamelCase = above_to_left_elt + above_to_right_elt def A_ ( snake_case : int ) -> list[list[int]]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise TypeError('''The input value of \'num_rows\' should be \'int\'''' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( '''The input value of \'num_rows\' should be greater than or equal to 0''' ) __UpperCamelCase = [[1]] for row_index in range(1 , snake_case ): __UpperCamelCase = [0] + result[-1] + [0] __UpperCamelCase = row_index + 1 # Calculate the number of distinct elements in a row __UpperCamelCase = sum(divmod(snake_case , 2 ) ) __UpperCamelCase = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] __UpperCamelCase = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() __UpperCamelCase = row_first_half + row_second_half result.append(snake_case ) return result def A_ ( ) -> None: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(snake_case : Callable , snake_case : int ) -> None: __UpperCamelCase = f"{func.__name__}({value})" __UpperCamelCase = timeit(f"__main__.{call}" , setup='''import __main__''' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(f"{call:38} -- {timing:.4f} seconds" ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(snake_case , snake_case ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
328
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : str = logging.get_logger(__name__) lowercase__ : int = { "alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'mgp-str' def __init__( self , SCREAMING_SNAKE_CASE_=[32, 128] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=27 , SCREAMING_SNAKE_CASE_=38 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=30522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=4.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_2 , **SCREAMING_SNAKE_CASE_ , )-> Tuple: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = num_channels __UpperCamelCase = max_token_length __UpperCamelCase = num_character_labels __UpperCamelCase = num_bpe_labels __UpperCamelCase = num_wordpiece_labels __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = mlp_ratio __UpperCamelCase = distilled __UpperCamelCase = layer_norm_eps __UpperCamelCase = drop_rate __UpperCamelCase = qkv_bias __UpperCamelCase = attn_drop_rate __UpperCamelCase = drop_path_rate __UpperCamelCase = output_aa_attentions __UpperCamelCase = initializer_range
328
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowercase__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) lowercase__ : Any = parser.parse_args() lowercase__ : Union[str, Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowercase__ : List[str] = CLIPImageProcessor() lowercase__ : Optional[Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") lowercase__ : Optional[Any] = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
328
1
from collections.abc import Generator from math import sin def A_ ( snake_case : bytes ) -> bytes: '''simple docstring''' if len(snake_case ) != 32: raise ValueError('''Input must be of length 32''' ) __UpperCamelCase = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def A_ ( snake_case : int ) -> bytes: '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) __UpperCamelCase = format(snake_case , '''08x''' )[-8:] __UpperCamelCase = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def A_ ( snake_case : bytes ) -> bytes: '''simple docstring''' __UpperCamelCase = B'''''' for char in message: bit_string += format(snake_case , '''08b''' ).encode('''utf-8''' ) __UpperCamelCase = format(len(snake_case ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(snake_case ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def A_ ( snake_case : bytes ) -> Generator[list[int], None, None]: '''simple docstring''' if len(snake_case ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(snake_case ) , 512 ): __UpperCamelCase = bit_string[pos : pos + 512] __UpperCamelCase = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def A_ ( snake_case : int ) -> int: '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) __UpperCamelCase = format(snake_case , '''032b''' ) __UpperCamelCase = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(snake_case , 2 ) def A_ ( snake_case : int , snake_case : int ) -> int: '''simple docstring''' return (a + b) % 2**32 def A_ ( snake_case : int , snake_case : int ) -> int: '''simple docstring''' if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def A_ ( snake_case : bytes ) -> bytes: '''simple docstring''' __UpperCamelCase = preprocess(snake_case ) __UpperCamelCase = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __UpperCamelCase = 0x67452301 __UpperCamelCase = 0xEFCDAB89 __UpperCamelCase = 0x98BADCFE __UpperCamelCase = 0x10325476 __UpperCamelCase = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(snake_case ): __UpperCamelCase = aa __UpperCamelCase = ba __UpperCamelCase = ca __UpperCamelCase = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __UpperCamelCase = d ^ (b & (c ^ d)) __UpperCamelCase = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __UpperCamelCase = c ^ (d & (b ^ c)) __UpperCamelCase = (5 * i + 1) % 16 elif i <= 47: __UpperCamelCase = b ^ c ^ d __UpperCamelCase = (3 * i + 5) % 16 else: __UpperCamelCase = c ^ (b | not_aa(snake_case )) __UpperCamelCase = (7 * i) % 16 __UpperCamelCase = (f + a + added_consts[i] + block_words[g]) % 2**32 __UpperCamelCase = d __UpperCamelCase = c __UpperCamelCase = b __UpperCamelCase = sum_aa(snake_case , left_rotate_aa(snake_case , shift_amounts[i] ) ) # Add hashed chunk to running total __UpperCamelCase = sum_aa(snake_case , snake_case ) __UpperCamelCase = sum_aa(snake_case , snake_case ) __UpperCamelCase = sum_aa(snake_case , snake_case ) __UpperCamelCase = sum_aa(snake_case , snake_case ) __UpperCamelCase = reformat_hex(snake_case ) + reformat_hex(snake_case ) + reformat_hex(snake_case ) + reformat_hex(snake_case ) return digest if __name__ == "__main__": import doctest doctest.testmod()
328
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowercase__ : Union[str, Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" lowercase__ : Optional[Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" lowercase__ : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" lowercase__ : Optional[int] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" lowercase__ : Optional[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def A__ ( self )-> Tuple: '''simple docstring''' return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[1, 10, 100] , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=3.0 )-> Union[str, Any]: '''simple docstring''' if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE_ ) as executor: __UpperCamelCase = [] __UpperCamelCase = Counter() __UpperCamelCase = 0 __UpperCamelCase = defaultdict(SCREAMING_SNAKE_CASE_ ) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ): for candidate in candidates: __UpperCamelCase = candidate + '''\n''' + test_case __UpperCamelCase = (test_program, timeout, task_id, completion_id[task_id]) __UpperCamelCase = executor.submit(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) futures.append(SCREAMING_SNAKE_CASE_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) __UpperCamelCase , __UpperCamelCase = [], [] for result in results.values(): result.sort() __UpperCamelCase = [r[1]['''passed'''] for r in result] total.append(len(SCREAMING_SNAKE_CASE_ ) ) correct.append(sum(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = np.array(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = k __UpperCamelCase = {F"pass@{k}": estimate_pass_at_k(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def A_ ( snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[Any] ) -> Optional[Any]: '''simple docstring''' def estimator(snake_case : int , snake_case : int , snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(snake_case , snake_case ): __UpperCamelCase = itertools.repeat(snake_case , len(snake_case ) ) else: assert len(snake_case ) == len(snake_case ) __UpperCamelCase = iter(snake_case ) return np.array([estimator(int(snake_case ) , int(snake_case ) , snake_case ) for n, c in zip(snake_case , snake_case )] )
328
1
from __future__ import annotations from collections.abc import Callable def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float: '''simple docstring''' __UpperCamelCase = x_start __UpperCamelCase = fnc(snake_case ) __UpperCamelCase = 0.0 for _ in range(snake_case ): # Approximates small segments of curve as linear and solve # for trapezoidal area __UpperCamelCase = (x_end - x_start) / steps + xa __UpperCamelCase = fnc(snake_case ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __UpperCamelCase = xa __UpperCamelCase = fxa return area if __name__ == "__main__": def A_ ( snake_case : Tuple ) -> Optional[Any]: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") lowercase__ : List[str] = 1_0 while i <= 1_0_0_0_0_0: print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}") i *= 1_0
328
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) lowercase__ : Optional[Any] = ["names", "prefix"] lowercase__ : List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] lowercase__ : Optional[Any] = ["encoding_errors", "on_bad_lines"] lowercase__ : List[str] = ["date_format"] @dataclass class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ): """simple docstring""" _snake_case = "," _snake_case = None _snake_case = "infer" _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = False _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = False _snake_case = True _snake_case = None _snake_case = "." _snake_case = None _snake_case = '"' _snake_case = 0 _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = 0 _snake_case = True _snake_case = False _snake_case = None _snake_case = 10000 _snake_case = None _snake_case = "strict" _snake_case = "error" _snake_case = None def A__ ( self )-> Any: '''simple docstring''' if self.delimiter is not None: __UpperCamelCase = self.delimiter if self.column_names is not None: __UpperCamelCase = self.column_names @property def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ): """simple docstring""" _snake_case = CsvConfig def A__ ( self )-> Any: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) __UpperCamelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ): __UpperCamelCase = data_files if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __UpperCamelCase = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) ) return splits def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.Table: '''simple docstring''' if self.config.features is not None: __UpperCamelCase = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE_ ) for feature in self.config.features.values() ): # cheaper cast __UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __UpperCamelCase = table_cast(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return pa_table def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __UpperCamelCase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE_ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ): __UpperCamelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , iterator=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = pa.Table.from_pandas(SCREAMING_SNAKE_CASE_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ ) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}" ) raise
328
1
def A_ ( snake_case : str ) -> int: '''simple docstring''' assert column_title.isupper() __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 1 __UpperCamelCase = 0 while index >= 0: __UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case ) answer += value power += 1 index -= 1 return answer if __name__ == "__main__": from doctest import testmod testmod()
328
from __future__ import annotations import math def A_ ( snake_case : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True lowercase__ : int = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def A_ ( snake_case : int ) -> list[int]: '''simple docstring''' if not isinstance(snake_case , snake_case ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) __UpperCamelCase = [] for num in range(len(snake_case ) ): __UpperCamelCase = 0 while 2 * i * i <= odd_composites[num]: __UpperCamelCase = odd_composites[num] - 2 * i * i if is_prime(snake_case ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(snake_case ) == n: return list_nums return [] def A_ ( ) -> int: '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F"{solution() = }")
328
1
def A_ ( snake_case : int , snake_case : int ) -> int: '''simple docstring''' return 1 if input_a == input_a else 0 def A_ ( ) -> None: '''simple docstring''' assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
328
from __future__ import annotations from collections.abc import Callable def A_ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , ) -> float: '''simple docstring''' __UpperCamelCase = x_start __UpperCamelCase = fnc(snake_case ) __UpperCamelCase = 0.0 for _ in range(snake_case ): # Approximates small segments of curve as linear and solve # for trapezoidal area __UpperCamelCase = (x_end - x_start) / steps + xa __UpperCamelCase = fnc(snake_case ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step __UpperCamelCase = xa __UpperCamelCase = fxa return area if __name__ == "__main__": def A_ ( snake_case : Tuple ) -> Optional[Any]: '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") lowercase__ : List[str] = 1_0 while i <= 1_0_0_0_0_0: print(F"with {i} steps: {trapezoidal_area(f, -5, 5, i)}") i *= 1_0
328
1
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0_0_2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , )-> Dict: '''simple docstring''' __UpperCamelCase = parent __UpperCamelCase = batch_size __UpperCamelCase = encoder_seq_length __UpperCamelCase = decoder_seq_length # For common tests __UpperCamelCase = self.decoder_seq_length __UpperCamelCase = is_training __UpperCamelCase = use_attention_mask __UpperCamelCase = use_labels __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = d_ff __UpperCamelCase = relative_attention_num_buckets __UpperCamelCase = dropout_rate __UpperCamelCase = initializer_factor __UpperCamelCase = eos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = decoder_start_token_id __UpperCamelCase = None __UpperCamelCase = decoder_layers def A__ ( self )-> Tuple: '''simple docstring''' return TaConfig.from_pretrained('''google/umt5-base''' ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , )-> Optional[int]: '''simple docstring''' if attention_mask is None: __UpperCamelCase = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __UpperCamelCase = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __UpperCamelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ ) if decoder_head_mask is None: __UpperCamelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ ) if cross_attn_head_mask is None: __UpperCamelCase = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def A__ ( self )-> Dict: '''simple docstring''' __UpperCamelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 ) __UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 ) __UpperCamelCase = self.get_config() __UpperCamelCase = config.num_attention_heads __UpperCamelCase = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return config, input_dict def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def A__ ( self )-> List[Any]: '''simple docstring''' return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def A__ ( self )-> str: '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> Dict: '''simple docstring''' __UpperCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCamelCase = model( input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = model(input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = result.last_hidden_state __UpperCamelCase = result.past_key_values __UpperCamelCase = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> int: '''simple docstring''' __UpperCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).get_decoder().to(SCREAMING_SNAKE_CASE_ ).eval() # first forward pass __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) ) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ ) + 1 ) __UpperCamelCase , __UpperCamelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ )['''last_hidden_state'''] __UpperCamelCase = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_ )['''last_hidden_state'''] # select random slice __UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach() __UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = UMTaModel(config=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ).half().eval() __UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE_ ).any().item() ) @require_torch class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): """simple docstring""" _snake_case = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) _snake_case = (UMTaForConditionalGeneration,) if is_torch_available() else () _snake_case = ( { 'conversational': UMTaForConditionalGeneration, 'feature-extraction': UMTaModel, 'summarization': UMTaForConditionalGeneration, 'text2text-generation': UMTaForConditionalGeneration, 'translation': UMTaForConditionalGeneration, 'question-answering': UMTaForQuestionAnswering, } if is_torch_available() else {} ) _snake_case = True _snake_case = False _snake_case = False _snake_case = True _snake_case = True # The small UMT5 model needs higher percentages for CPU/MP tests _snake_case = [0.8, 0.9] def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def A__ ( self )-> List[Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() __UpperCamelCase = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE_ ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( SCREAMING_SNAKE_CASE_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=SCREAMING_SNAKE_CASE_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def A__ ( self )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] __UpperCamelCase = self.model_tester.prepare_config_and_inputs() __UpperCamelCase = config_and_inputs[0] __UpperCamelCase = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() model.to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ), } for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE_ , head_masking.items() ): __UpperCamelCase = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __UpperCamelCase = torch.ones( config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE_ , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) # We check the state of decoder_attentions and cross_attentions just from the last step __UpperCamelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def A__ ( self )-> Any: '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=SCREAMING_SNAKE_CASE_ , legacy=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] __UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE_ ).input_ids # fmt: off __UpperCamelCase = torch.tensor( [ [ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = model.generate(input_ids.to(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] __UpperCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
328
import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowercase__ : int = logging.get_logger(__name__) lowercase__ : List[str] = ["model.decoder.embed_positions.weights"] def A_ ( snake_case : Any ) -> List[Any]: '''simple docstring''' if "emb" in name: __UpperCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: __UpperCamelCase = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: __UpperCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: __UpperCamelCase = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: __UpperCamelCase = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: __UpperCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: __UpperCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: __UpperCamelCase = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: __UpperCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: __UpperCamelCase = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: __UpperCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def A_ ( snake_case : OrderedDict , snake_case : int ) -> Tuple[Dict, Dict]: '''simple docstring''' __UpperCamelCase = list(state_dict.keys() ) __UpperCamelCase = {} for key in keys: __UpperCamelCase = state_dict.pop(snake_case ) __UpperCamelCase = rename_keys(snake_case ) if "in_proj_weight" in key: # split fused qkv proj __UpperCamelCase = val[:hidden_size, :] __UpperCamelCase = val[hidden_size : 2 * hidden_size, :] __UpperCamelCase = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: __UpperCamelCase = val else: __UpperCamelCase = val return state_dict, enc_dec_proj_state_dict def A_ ( snake_case : str ) -> MusicgenDecoderConfig: '''simple docstring''' if checkpoint == "small": # default config values __UpperCamelCase = 1024 __UpperCamelCase = 24 __UpperCamelCase = 16 elif checkpoint == "medium": __UpperCamelCase = 1536 __UpperCamelCase = 48 __UpperCamelCase = 24 elif checkpoint == "large": __UpperCamelCase = 2048 __UpperCamelCase = 48 __UpperCamelCase = 32 else: raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." ) __UpperCamelCase = MusicgenDecoderConfig( hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , ) return config @torch.no_grad() def A_ ( snake_case : Any , snake_case : str=None , snake_case : Any=None , snake_case : Union[str, Any]="cpu" ) -> List[Any]: '''simple docstring''' __UpperCamelCase = MusicGen.get_pretrained(snake_case , device=snake_case ) __UpperCamelCase = decoder_config_from_checkpoint(snake_case ) __UpperCamelCase = fairseq_model.lm.state_dict() __UpperCamelCase , __UpperCamelCase = rename_state_dict( snake_case , hidden_size=decoder_config.hidden_size ) __UpperCamelCase = TaEncoderModel.from_pretrained('''t5-base''' ) __UpperCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) __UpperCamelCase = MusicgenForCausalLM(snake_case ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection __UpperCamelCase , __UpperCamelCase = decoder.load_state_dict(snake_case , strict=snake_case ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(snake_case ) if len(snake_case ) > 0: raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" ) if len(snake_case ) > 0: raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" ) # init the composite model __UpperCamelCase = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(snake_case ) # check we can do a forward pass __UpperCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) __UpperCamelCase = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): __UpperCamelCase = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits if logits.shape != (8, 1, 2048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor __UpperCamelCase = AutoTokenizer.from_pretrained('''t5-base''' ) __UpperCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) __UpperCamelCase = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case ) # set the appropriate bos/pad token ids __UpperCamelCase = 2048 __UpperCamelCase = 2048 # set other default generation config params __UpperCamelCase = int(30 * audio_encoder.config.frame_rate ) __UpperCamelCase = True __UpperCamelCase = 3.0 if pytorch_dump_folder is not None: Path(snake_case ).mkdir(exist_ok=snake_case ) logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" ) model.save_pretrained(snake_case ) processor.save_pretrained(snake_case ) if repo_id: logger.info(f"Pushing model {checkpoint} to {repo_id}" ) model.push_to_hub(snake_case ) processor.push_to_hub(snake_case ) if __name__ == "__main__": lowercase__ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint", default="small", type=str, help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.", ) parser.add_argument( "--pytorch_dump_folder", required=True, default=None, type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) parser.add_argument( "--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda." ) lowercase__ : Tuple = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
328
1
import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput lowercase__ : Tuple = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ )-> List[Any]: '''simple docstring''' super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = eval_examples __UpperCamelCase = post_process_function __UpperCamelCase = quant_trainer_args __UpperCamelCase = 128 # default number of calibration samples def A__ ( self , SCREAMING_SNAKE_CASE_=None )-> Optional[int]: '''simple docstring''' if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __UpperCamelCase = calib_dataset if calib_dataset is not None else self.calib_dataset __UpperCamelCase = self._remove_unused_columns(SCREAMING_SNAKE_CASE_ , description='''Calibration''' ) return DataLoader( SCREAMING_SNAKE_CASE_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=SCREAMING_SNAKE_CASE_ , ) def A__ ( self , SCREAMING_SNAKE_CASE_=None )-> Any: '''simple docstring''' __UpperCamelCase = self.train_dataset if calib_dataset is None else calib_dataset __UpperCamelCase = self.get_calib_dataloader(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.model quant_trainer.configure_model(SCREAMING_SNAKE_CASE_ , self.quant_trainer_args , calib=SCREAMING_SNAKE_CASE_ ) model.eval() quant_trainer.enable_calibration(SCREAMING_SNAKE_CASE_ ) logger.info('''***** Running calibration *****''' ) logger.info(F" Num examples = {self.calib_num}" ) logger.info(F" Batch size = {calib_dataloader.batch_size}" ) for step, inputs in enumerate(SCREAMING_SNAKE_CASE_ ): # Prediction step __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.prediction_step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prediction_loss_only=SCREAMING_SNAKE_CASE_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(SCREAMING_SNAKE_CASE_ , self.quant_trainer_args ) __UpperCamelCase = model def A__ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = "eval" )-> str: '''simple docstring''' __UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset __UpperCamelCase = self.get_eval_dataloader(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __UpperCamelCase = self.compute_metrics __UpperCamelCase = None __UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __UpperCamelCase = eval_loop( SCREAMING_SNAKE_CASE_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , ) finally: __UpperCamelCase = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __UpperCamelCase = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions ) __UpperCamelCase = self.compute_metrics(SCREAMING_SNAKE_CASE_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): __UpperCamelCase = metrics.pop(SCREAMING_SNAKE_CASE_ ) self.log(SCREAMING_SNAKE_CASE_ ) else: __UpperCamelCase = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE_ ) return metrics def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = "test" )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = self.get_test_dataloader(SCREAMING_SNAKE_CASE_ ) # Temporarily disable metric computation, we will do it in the loop here. __UpperCamelCase = self.compute_metrics __UpperCamelCase = None __UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __UpperCamelCase = eval_loop( SCREAMING_SNAKE_CASE_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE_ , ) finally: __UpperCamelCase = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __UpperCamelCase = self.post_process_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , output.predictions , '''predict''' ) __UpperCamelCase = self.compute_metrics(SCREAMING_SNAKE_CASE_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"{metric_key_prefix}_" ): __UpperCamelCase = metrics.pop(SCREAMING_SNAKE_CASE_ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_="./" )-> str: '''simple docstring''' __UpperCamelCase = self.eval_dataset __UpperCamelCase = self.get_eval_dataloader(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = next(iter(SCREAMING_SNAKE_CASE_ ) ) # saving device - to make it consistent __UpperCamelCase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __UpperCamelCase = tuple(v.to(SCREAMING_SNAKE_CASE_ ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __UpperCamelCase = True __UpperCamelCase = self.model.to(SCREAMING_SNAKE_CASE_ ) model.eval() model.float() __UpperCamelCase = model.module if hasattr(SCREAMING_SNAKE_CASE_ , '''module''' ) else model quant_trainer.configure_model(SCREAMING_SNAKE_CASE_ , self.quant_trainer_args ) __UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''model.onnx''' ) logger.info(F"exporting model to {output_model_file}" ) __UpperCamelCase = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , export_params=SCREAMING_SNAKE_CASE_ , opset_version=13 , do_constant_folding=SCREAMING_SNAKE_CASE_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=SCREAMING_SNAKE_CASE_ , ) logger.info('''onnx export finished''' )
328
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase__ : List[str] = 1_6 lowercase__ : str = 3_2 def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) __UpperCamelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCamelCase = datasets.map( snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case : str ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCamelCase = 16 elif accelerator.mixed_precision != "no": __UpperCamelCase = 8 else: __UpperCamelCase = None return tokenizer.pad( snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. __UpperCamelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) __UpperCamelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase__ : Union[str, Any] = mocked_dataloaders # noqa: F811 def A_ ( snake_case : List[str] , snake_case : List[Any] ) -> Tuple: '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1": __UpperCamelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: __UpperCamelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: __UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase = config['''lr'''] __UpperCamelCase = int(config['''num_epochs'''] ) __UpperCamelCase = int(config['''seed'''] ) __UpperCamelCase = int(config['''batch_size'''] ) set_seed(snake_case ) __UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case ) __UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation __UpperCamelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE __UpperCamelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer __UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case ) # Instantiate scheduler __UpperCamelCase = get_linear_schedule_with_warmup( optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: __UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0] accelerator.init_trackers(snake_case , snake_case ) # Now we train the model for epoch in range(snake_case ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: __UpperCamelCase = 0 for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() __UpperCamelCase = loss / gradient_accumulation_steps accelerator.backward(snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.logits.argmax(dim=-1 ) __UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=snake_case , references=snake_case , ) __UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:" , snake_case ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(snake_case ), '''epoch''': epoch, } , step=snake_case , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def A_ ( ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(snake_case , snake_case ) if __name__ == "__main__": main()
328
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowercase__ : int = "Create a default config file for Accelerate with only a few flags set." def A_ ( snake_case : int="no" , snake_case : str = default_json_config_file , snake_case : bool = False ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase = Path(snake_case ) path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) if path.exists(): print( f"Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`." ) return False __UpperCamelCase = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( f"`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}" ) __UpperCamelCase = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): __UpperCamelCase = torch.cuda.device_count() __UpperCamelCase = num_gpus __UpperCamelCase = False if num_gpus > 1: __UpperCamelCase = '''MULTI_GPU''' else: __UpperCamelCase = '''NO''' elif is_xpu_available() and use_xpu: __UpperCamelCase = torch.xpu.device_count() __UpperCamelCase = num_xpus __UpperCamelCase = False if num_xpus > 1: __UpperCamelCase = '''MULTI_XPU''' else: __UpperCamelCase = '''NO''' elif is_npu_available(): __UpperCamelCase = torch.npu.device_count() __UpperCamelCase = num_npus __UpperCamelCase = False if num_npus > 1: __UpperCamelCase = '''MULTI_NPU''' else: __UpperCamelCase = '''NO''' else: __UpperCamelCase = 0 __UpperCamelCase = True __UpperCamelCase = 1 __UpperCamelCase = '''NO''' __UpperCamelCase = ClusterConfig(**snake_case ) config.to_json_file(snake_case ) return path def A_ ( snake_case : Dict , snake_case : Optional[Any] ) -> Optional[int]: '''simple docstring''' __UpperCamelCase = parser.add_parser('''default''' , parents=snake_case , help=snake_case , formatter_class=snake_case ) parser.add_argument( '''--config_file''' , default=snake_case , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=snake_case , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=snake_case ) return parser def A_ ( snake_case : Optional[int] ) -> int: '''simple docstring''' __UpperCamelCase = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(f"accelerate configuration saved at {config_file}" )
328
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType lowercase__ : str = logging.get_logger(__name__) lowercase__ : Union[str, Any] = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5, 7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7, 1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1, 4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6, 1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1, 1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9, 3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1 ] lowercase__ : str = [ 1, 2, 7, 8, 9, 1_0, 1_4, 2_5, 2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2, 6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3, 8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7, 3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7, 7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3, 1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5, 2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5, 4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2 ] class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'whisper' _snake_case = ['past_key_values'] _snake_case = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , SCREAMING_SNAKE_CASE_=51865 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=50257 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1500 , SCREAMING_SNAKE_CASE_=448 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=50256 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[220, 50256] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=256 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = num_mel_bins __UpperCamelCase = d_model __UpperCamelCase = encoder_layers __UpperCamelCase = encoder_attention_heads __UpperCamelCase = decoder_layers __UpperCamelCase = decoder_attention_heads __UpperCamelCase = decoder_ffn_dim __UpperCamelCase = encoder_ffn_dim __UpperCamelCase = dropout __UpperCamelCase = attention_dropout __UpperCamelCase = activation_dropout __UpperCamelCase = activation_function __UpperCamelCase = init_std __UpperCamelCase = encoder_layerdrop __UpperCamelCase = decoder_layerdrop __UpperCamelCase = use_cache __UpperCamelCase = encoder_layers __UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __UpperCamelCase = max_source_positions __UpperCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __UpperCamelCase = classifier_proj_size __UpperCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __UpperCamelCase = apply_spec_augment __UpperCamelCase = mask_time_prob __UpperCamelCase = mask_time_length __UpperCamelCase = mask_time_min_masks __UpperCamelCase = mask_feature_prob __UpperCamelCase = mask_feature_length __UpperCamelCase = mask_feature_min_masks __UpperCamelCase = median_filter_width super().__init__( pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def A__ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' __UpperCamelCase = OrderedDict( [ ('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}), ] ) if self.use_past: __UpperCamelCase = {0: '''batch'''} else: __UpperCamelCase = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' ) return common_inputs def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 22050 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 220 , )-> Mapping[str, Any]: '''simple docstring''' __UpperCamelCase = OrderedDict() __UpperCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = encoder_inputs['''input_features'''].shape[2] __UpperCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __UpperCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = encoder_inputs.pop('''input_features''' ) __UpperCamelCase = decoder_inputs.pop('''decoder_input_ids''' ) if "past_key_values" in decoder_inputs: __UpperCamelCase = decoder_inputs.pop('''past_key_values''' ) return dummy_inputs @property def A__ ( self )-> float: '''simple docstring''' return 1E-3
328
1
import copy import tempfile import unittest from huggingface_hub import HfFolder, delete_repo from parameterized import parameterized from requests.exceptions import HTTPError from transformers import AutoConfig, GenerationConfig from transformers.testing_utils import TOKEN, USER, is_staging_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @parameterized.expand([(None,), ('''foo.json''',)] ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict: '''simple docstring''' __UpperCamelCase = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , config_name=SCREAMING_SNAKE_CASE_ ) # Checks parameters that were specified self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.temperature , 0.7 ) self.assertEqual(loaded_config.length_penalty , 1.0 ) self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] ) # Checks parameters that were not specified (defaults) self.assertEqual(loaded_config.top_k , 50 ) self.assertEqual(loaded_config.max_length , 20 ) self.assertEqual(loaded_config.max_time , SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> List[str]: '''simple docstring''' __UpperCamelCase = AutoConfig.from_pretrained('''gpt2''' ) __UpperCamelCase = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = GenerationConfig() # The generation config has loaded a few non-default parameters from the model config self.assertNotEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # One of those parameters is eos_token_id -- check if it matches self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id ) self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id ) def A__ ( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = GenerationConfig() __UpperCamelCase = { '''max_new_tokens''': 1024, '''foo''': '''bar''', } __UpperCamelCase = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = generation_config.update(**SCREAMING_SNAKE_CASE_ ) # update_kwargs was not modified (no side effects) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(generation_config.max_new_tokens , 1024 ) # `.update()` returns a dictionary of unused kwargs self.assertEqual(SCREAMING_SNAKE_CASE_ , {'''foo''': '''bar'''} ) def A__ ( self )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = GenerationConfig() __UpperCamelCase = '''bar''' with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir: generation_config.save_pretrained(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) # update_kwargs was used to update the config on valid attributes self.assertEqual(new_config.foo , '''bar''' ) __UpperCamelCase = GenerationConfig.from_model_config(SCREAMING_SNAKE_CASE_ ) assert not hasattr(SCREAMING_SNAKE_CASE_ , '''foo''' ) # no new kwargs should be initialized if from config def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = GenerationConfig() self.assertEqual(default_config.temperature , 1.0 ) self.assertEqual(default_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(default_config.num_beams , 1 ) __UpperCamelCase = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , ) self.assertEqual(config.temperature , 0.7 ) self.assertEqual(config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(config.num_beams , 1 ) with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = GenerationConfig.from_pretrained(SCREAMING_SNAKE_CASE_ , temperature=1.0 ) self.assertEqual(loaded_config.temperature , 1.0 ) self.assertEqual(loaded_config.do_sample , SCREAMING_SNAKE_CASE_ ) self.assertEqual(loaded_config.num_beams , 1 ) # default value @is_staging_test class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @classmethod def A__ ( cls )-> Union[str, Any]: '''simple docstring''' __UpperCamelCase = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE_ ) @classmethod def A__ ( cls )-> List[str]: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-generation-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' ) except HTTPError: pass def A__ ( self )-> str: '''simple docstring''' __UpperCamelCase = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''test-generation-config''' , use_auth_token=self._token ) __UpperCamelCase = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-generation-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''test-generation-config''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) __UpperCamelCase = GenerationConfig.from_pretrained(F"{USER}/test-generation-config" ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) def A__ ( self )-> int: '''simple docstring''' __UpperCamelCase = GenerationConfig( do_sample=SCREAMING_SNAKE_CASE_ , temperature=0.7 , length_penalty=1.0 , ) config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token ) __UpperCamelCase = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( SCREAMING_SNAKE_CASE_ , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token ) __UpperCamelCase = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
328
import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase__ : Any = logging.get_logger(__name__) lowercase__ : Tuple = { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json", } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'xlnet' _snake_case = ['mems'] _snake_case = { 'n_token': 'vocab_size', # Backward compatibility 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , SCREAMING_SNAKE_CASE_=32000 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=4096 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="bi" , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="tanh" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , )-> List[str]: '''simple docstring''' __UpperCamelCase = vocab_size __UpperCamelCase = d_model __UpperCamelCase = n_layer __UpperCamelCase = n_head if d_model % n_head != 0: raise ValueError(F"'d_model % n_head' ({d_model % n_head}) should be equal to 0" ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" ) __UpperCamelCase = d_model // n_head __UpperCamelCase = ff_activation __UpperCamelCase = d_inner __UpperCamelCase = untie_r __UpperCamelCase = attn_type __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = dropout __UpperCamelCase = mem_len __UpperCamelCase = reuse_len __UpperCamelCase = bi_data __UpperCamelCase = clamp_len __UpperCamelCase = same_length __UpperCamelCase = summary_type __UpperCamelCase = summary_use_proj __UpperCamelCase = summary_activation __UpperCamelCase = summary_last_dropout __UpperCamelCase = start_n_top __UpperCamelCase = end_n_top __UpperCamelCase = bos_token_id __UpperCamelCase = pad_token_id __UpperCamelCase = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = kwargs['''use_cache'''] __UpperCamelCase = use_mems_eval __UpperCamelCase = use_mems_train super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def A__ ( self )-> Optional[Any]: '''simple docstring''' logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' raise NotImplementedError( F"The model {self.model_type} is one of the few models that has no sequence length limit." )
328
1
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowercase__ : List[str] = logging.get_logger(__name__) def A_ ( snake_case : Optional[Any] ) -> Dict: '''simple docstring''' __UpperCamelCase = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError('''Quantized models are not supported.''' ) __UpperCamelCase = re.match(r'''^mobilenet_v1_([^_]*)_([^_]*)$''' , snake_case ) if matches: __UpperCamelCase = float(matches[1] ) __UpperCamelCase = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __UpperCamelCase = 1001 __UpperCamelCase = '''imagenet-1k-id2label.json''' __UpperCamelCase = '''huggingface/label-files''' __UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __UpperCamelCase = {int(snake_case ) + 1: v for k, v in idalabel.items()} __UpperCamelCase = '''background''' __UpperCamelCase = idalabel __UpperCamelCase = {v: k for k, v in idalabel.items()} return config def A_ ( ) -> int: '''simple docstring''' __UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw ) return im @torch.no_grad() def A_ ( snake_case : int , snake_case : Dict , snake_case : List[str] , snake_case : Optional[int]=False ) -> int: '''simple docstring''' __UpperCamelCase = get_mobilenet_va_config(snake_case ) # Load 🤗 model __UpperCamelCase = MobileNetVaForImageClassification(snake_case ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(snake_case , snake_case , snake_case ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __UpperCamelCase = MobileNetVaImageProcessor( crop_size={'''width''': config.image_size, '''height''': config.image_size} , size={'''shortest_edge''': config.image_size + 32} , ) __UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ) __UpperCamelCase = model(**snake_case ) __UpperCamelCase = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __UpperCamelCase = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __UpperCamelCase = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __UpperCamelCase = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 ) Path(snake_case ).mkdir(exist_ok=snake_case ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case ) print(f"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(snake_case ) if push_to_hub: print('''Pushing to the hub...''' ) __UpperCamelCase = '''google/''' + model_name image_processor.push_to_hub(snake_case ) model.push_to_hub(snake_case ) if __name__ == "__main__": lowercase__ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="mobilenet_v1_1.0_224", type=str, help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.", ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) lowercase__ : Tuple = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
328
from typing import Any class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None def __repr__( self )-> str: '''simple docstring''' return F"Node({self.data})" class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self )-> Optional[int]: '''simple docstring''' __UpperCamelCase = None def __iter__( self )-> Any: '''simple docstring''' __UpperCamelCase = self.head while node: yield node.data __UpperCamelCase = node.next def __len__( self )-> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self )-> str: '''simple docstring''' return "->".join([str(SCREAMING_SNAKE_CASE_ ) for item in self] ) def __getitem__( self , SCREAMING_SNAKE_CASE_ )-> Any: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) for i, node in enumerate(self ): if i == index: return node return None def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index < len(self ): raise ValueError('''list index out of range.''' ) __UpperCamelCase = self.head for _ in range(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = current.next __UpperCamelCase = data def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(len(self ) , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' self.insert_nth(0 , SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' if not 0 <= index <= len(self ): raise IndexError('''list index out of range''' ) __UpperCamelCase = Node(SCREAMING_SNAKE_CASE_ ) if self.head is None: __UpperCamelCase = new_node elif index == 0: __UpperCamelCase = self.head # link new_node to head __UpperCamelCase = new_node else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = new_node def A__ ( self )-> None: # print every node data '''simple docstring''' print(self ) def A__ ( self )-> Any: '''simple docstring''' return self.delete_nth(0 ) def A__ ( self )-> Any: # delete from tail '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def A__ ( self , SCREAMING_SNAKE_CASE_ = 0 )-> Any: '''simple docstring''' if not 0 <= index <= len(self ) - 1: # test if index is valid raise IndexError('''List index out of range.''' ) __UpperCamelCase = self.head # default first node if index == 0: __UpperCamelCase = self.head.next else: __UpperCamelCase = self.head for _ in range(index - 1 ): __UpperCamelCase = temp.next __UpperCamelCase = temp.next __UpperCamelCase = temp.next.next return delete_node.data def A__ ( self )-> bool: '''simple docstring''' return self.head is None def A__ ( self )-> None: '''simple docstring''' __UpperCamelCase = None __UpperCamelCase = self.head while current: # Store the current node's next node. __UpperCamelCase = current.next # Make the current node's next point backwards __UpperCamelCase = prev # Make the previous node be the current node __UpperCamelCase = current # Make the current node the next node (to progress iteration) __UpperCamelCase = next_node # Return prev in order to put the head at the end __UpperCamelCase = prev def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = LinkedList() assert linked_list.is_empty() is True assert str(snake_case ) == "" try: linked_list.delete_head() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() raise AssertionError # This should not happen. except IndexError: assert True # This should happen. for i in range(10 ): assert len(snake_case ) == i linked_list.insert_nth(snake_case , i + 1 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 11 ) ) linked_list.insert_head(0 ) linked_list.insert_tail(11 ) assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 12 ) ) assert linked_list.delete_head() == 0 assert linked_list.delete_nth(9 ) == 10 assert linked_list.delete_tail() == 11 assert len(snake_case ) == 9 assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 10 ) ) assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True for i in range(0 , 9 ): __UpperCamelCase = -i assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True linked_list.reverse() assert str(snake_case ) == "->".join(str(snake_case ) for i in range(-8 , 1 ) ) def A_ ( ) -> None: '''simple docstring''' __UpperCamelCase = [ -9, 100, Node(77345112 ), '''dlrow olleH''', 7, 5555, 0, -192.55555, '''Hello, world!''', 77.9, Node(10 ), None, None, 12.20, ] __UpperCamelCase = LinkedList() for i in test_input: linked_list.insert_tail(snake_case ) # Check if it's empty or not assert linked_list.is_empty() is False assert ( str(snake_case ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->" "-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the head __UpperCamelCase = linked_list.delete_head() assert result == -9 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None->12.2" ) # Delete the tail __UpperCamelCase = linked_list.delete_tail() assert result == 12.2 assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None->None" ) # Delete a node in specific location in linked list __UpperCamelCase = linked_list.delete_nth(10 ) assert result is None assert ( str(snake_case ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->" "Hello, world!->77.9->Node(10)->None" ) # Add a Node instance to its head linked_list.insert_head(Node('''Hello again, world!''' ) ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None" ) # Add None to its tail linked_list.insert_tail(snake_case ) assert ( str(snake_case ) == "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->" "7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None" ) # Reverse the linked list linked_list.reverse() assert ( str(snake_case ) == "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->" "7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)" ) def A_ ( ) -> Any: '''simple docstring''' from doctest import testmod testmod() __UpperCamelCase = LinkedList() linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() ) linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() ) linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() ) print('''\nPrint list:''' ) linked_list.print_list() print('''\nDelete head''' ) linked_list.delete_head() print('''Delete tail''' ) linked_list.delete_tail() print('''\nPrint list:''' ) linked_list.print_list() print('''\nReverse linked list''' ) linked_list.reverse() print('''\nPrint list:''' ) linked_list.print_list() print('''\nString representation of linked list:''' ) print(snake_case ) print('''\nReading/changing Node data using indexing:''' ) print(f"Element at Position 1: {linked_list[1]}" ) __UpperCamelCase = input('''Enter New Value: ''' ).strip() print('''New list:''' ) print(snake_case ) print(f"length of linked_list is : {len(snake_case )}" ) if __name__ == "__main__": main()
328
1
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def A_ ( snake_case : Dict , snake_case : Tuple , snake_case : List[Any] , snake_case : Any ) -> int: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})" def A_ ( snake_case : Optional[Any] , snake_case : int , snake_case : List[Any] , snake_case : str , snake_case : List[Any]=True ) -> Optional[Any]: '''simple docstring''' model.train() __UpperCamelCase = model(snake_case ) __UpperCamelCase = F.mse_loss(snake_case , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(snake_case ) def A_ ( snake_case : Optional[Any] , snake_case : str=False ) -> Tuple: '''simple docstring''' set_seed(42 ) __UpperCamelCase = RegressionModel() __UpperCamelCase = deepcopy(snake_case ) __UpperCamelCase = RegressionDataset(length=80 ) __UpperCamelCase = DataLoader(snake_case , batch_size=16 ) model.to(accelerator.device ) if sched: __UpperCamelCase = AdamW(params=model.parameters() , lr=1e-3 ) __UpperCamelCase = AdamW(params=ddp_model.parameters() , lr=1e-3 ) __UpperCamelCase = LambdaLR(snake_case , lr_lambda=lambda snake_case : epoch**0.65 ) __UpperCamelCase = LambdaLR(snake_case , lr_lambda=lambda snake_case : epoch**0.65 ) # Make a copy of `model` if sched: __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(snake_case , snake_case , snake_case , snake_case ) else: __UpperCamelCase , __UpperCamelCase = accelerator.prepare(snake_case , snake_case ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def A_ ( snake_case : str ) -> List[str]: '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_training_setup(snake_case ) # Use a single batch __UpperCamelCase , __UpperCamelCase = next(iter(snake_case ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCamelCase , __UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCamelCase , __UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case , snake_case , snake_case , snake_case ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case ): step_model(snake_case , snake_case , snake_case , snake_case ) else: # Sync grads step_model(snake_case , snake_case , snake_case , snake_case ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(snake_case , snake_case , snake_case , snake_case ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __UpperCamelCase = ddp_input[torch.randperm(len(snake_case ) )] def A_ ( snake_case : str ) -> Any: '''simple docstring''' __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_training_setup(snake_case ) # Use a single batch __UpperCamelCase , __UpperCamelCase = next(iter(snake_case ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model __UpperCamelCase , __UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCamelCase , __UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case , snake_case , snake_case , snake_case ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(snake_case ): step_model(snake_case , snake_case , snake_case , snake_case ) else: # Sync grads step_model(snake_case , snake_case , snake_case , snake_case ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __UpperCamelCase = ddp_input[torch.randperm(len(snake_case ) )] def A_ ( snake_case : Optional[int]=False , snake_case : Tuple=False ) -> Tuple: '''simple docstring''' __UpperCamelCase = Accelerator( split_batches=snake_case , dispatch_batches=snake_case , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_training_setup(snake_case ) for iteration, batch in enumerate(snake_case ): __UpperCamelCase , __UpperCamelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCamelCase , __UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCamelCase , __UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(snake_case , snake_case , snake_case , snake_case , snake_case ) # Do "gradient accumulation" (noop) with accelerator.accumulate(snake_case ): step_model(snake_case , snake_case , snake_case , snake_case ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})" # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) __UpperCamelCase = ddp_input[torch.randperm(len(snake_case ) )] GradientState._reset_state() def A_ ( snake_case : str=False , snake_case : Tuple=False ) -> List[Any]: '''simple docstring''' __UpperCamelCase = Accelerator( split_batches=snake_case , dispatch_batches=snake_case , gradient_accumulation_steps=2 ) # Test that context manager behaves properly __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = get_training_setup(snake_case , snake_case ) for iteration, batch in enumerate(snake_case ): __UpperCamelCase , __UpperCamelCase = batch.values() # Gather the distributed inputs and targs for the base model __UpperCamelCase , __UpperCamelCase = accelerator.gather((ddp_input, ddp_target) ) __UpperCamelCase , __UpperCamelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(snake_case , snake_case , snake_case , snake_case , snake_case ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(snake_case ): step_model(snake_case , snake_case , snake_case , snake_case ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n" __UpperCamelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case )) if accelerator.num_processes > 1: check_model_parameters(snake_case , snake_case , snake_case , snake_case ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def A_ ( ) -> int: '''simple docstring''' __UpperCamelCase = Accelerator() __UpperCamelCase = RegressionDataset(length=80 ) __UpperCamelCase = DataLoader(snake_case , batch_size=16 ) __UpperCamelCase = RegressionDataset(length=96 ) __UpperCamelCase = DataLoader(snake_case , batch_size=16 ) __UpperCamelCase , __UpperCamelCase = accelerator.prepare(snake_case , snake_case ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(snake_case ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case ) if iteration < len(snake_case ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(snake_case ): assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case ) if batch_num < len(snake_case ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def A_ ( ) -> List[Any]: '''simple docstring''' __UpperCamelCase = Accelerator() __UpperCamelCase = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(snake_case ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(snake_case ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , ) test_gradient_accumulation(snake_case , snake_case ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , ) test_gradient_accumulation_with_opt_and_scheduler(snake_case , snake_case ) def A_ ( snake_case : Tuple ) -> int: '''simple docstring''' main() if __name__ == "__main__": main()
328
import math def A_ ( snake_case : int ) -> bool: '''simple docstring''' return math.sqrt(snake_case ) * math.sqrt(snake_case ) == num def A_ ( snake_case : int ) -> bool: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = n while left <= right: __UpperCamelCase = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
328
1
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase__ : Optional[int] = datasets.utils.logging.get_logger(__name__) lowercase__ : Optional[Any] = ["names", "prefix"] lowercase__ : List[Any] = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] lowercase__ : Optional[Any] = ["encoding_errors", "on_bad_lines"] lowercase__ : List[str] = ["date_format"] @dataclass class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ): """simple docstring""" _snake_case = "," _snake_case = None _snake_case = "infer" _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = False _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = False _snake_case = True _snake_case = None _snake_case = "." _snake_case = None _snake_case = '"' _snake_case = 0 _snake_case = None _snake_case = None _snake_case = None _snake_case = None _snake_case = True _snake_case = True _snake_case = 0 _snake_case = True _snake_case = False _snake_case = None _snake_case = 10000 _snake_case = None _snake_case = "strict" _snake_case = "error" _snake_case = None def A__ ( self )-> Any: '''simple docstring''' if self.delimiter is not None: __UpperCamelCase = self.delimiter if self.column_names is not None: __UpperCamelCase = self.column_names @property def A__ ( self )-> Any: '''simple docstring''' __UpperCamelCase = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ): """simple docstring""" _snake_case = CsvConfig def A__ ( self )-> Any: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Optional[int]: '''simple docstring''' if not self.config.data_files: raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" ) __UpperCamelCase = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ): __UpperCamelCase = data_files if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __UpperCamelCase = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = [files] __UpperCamelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) ) return splits def A__ ( self , SCREAMING_SNAKE_CASE_ )-> pa.Table: '''simple docstring''' if self.config.features is not None: __UpperCamelCase = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE_ ) for feature in self.config.features.values() ): # cheaper cast __UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example __UpperCamelCase = table_cast(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return pa_table def A__ ( self , SCREAMING_SNAKE_CASE_ )-> str: '''simple docstring''' __UpperCamelCase = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str __UpperCamelCase = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE_ ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ): __UpperCamelCase = pd.read_csv(SCREAMING_SNAKE_CASE_ , iterator=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE_ ): __UpperCamelCase = pa.Table.from_pandas(SCREAMING_SNAKE_CASE_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ ) except ValueError as e: logger.error(F"Failed to read file '{file}' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}" ) raise
328
def A_ ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] lowercase__ : List[str] = generate_large_matrix() lowercase__ : Tuple = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def A_ ( snake_case : list[list[int]] ) -> None: '''simple docstring''' assert all(row == sorted(snake_case , reverse=snake_case ) for row in grid ) assert all(list(snake_case ) == sorted(snake_case , reverse=snake_case ) for col in zip(*snake_case ) ) def A_ ( snake_case : list[int] ) -> int: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __UpperCamelCase = (left + right) // 2 __UpperCamelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __UpperCamelCase = mid + 1 else: __UpperCamelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(snake_case ) def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' __UpperCamelCase = 0 __UpperCamelCase = len(grid[0] ) for i in range(len(snake_case ) ): __UpperCamelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(snake_case ) * len(grid[0] )) - total def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def A_ ( snake_case : list[list[int]] ) -> int: '''simple docstring''' __UpperCamelCase = 0 for row in grid: for i, number in enumerate(snake_case ): if number < 0: total += len(snake_case ) - i break return total def A_ ( ) -> None: '''simple docstring''' from timeit import timeit print('''Running benchmarks''' ) __UpperCamelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __UpperCamelCase = timeit(f"{func}(grid=grid)" , setup=snake_case , number=500 ) print(f"{func}() took {time:0.4f} seconds" ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
328
1
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE_ ) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _snake_case = Features({'audio': Audio()} ) _snake_case = Features({'labels': ClassLabel} ) _snake_case = "audio" _snake_case = "labels" def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' if self.label_column not in features: raise ValueError(F"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column] , SCREAMING_SNAKE_CASE_ ): raise ValueError(F"Column {self.label_column} is not a ClassLabel." ) __UpperCamelCase = copy.deepcopy(self ) __UpperCamelCase = self.label_schema.copy() __UpperCamelCase = features[self.label_column] __UpperCamelCase = label_schema return task_template @property def A__ ( self )-> Dict[str, str]: '''simple docstring''' return { self.audio_column: "audio", self.label_column: "labels", }
328
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase__ : str = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> None: '''simple docstring''' super().__init__(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = size if size is not None else {'''shortest_edge''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = do_resize __UpperCamelCase = size __UpperCamelCase = crop_pct __UpperCamelCase = resample __UpperCamelCase = do_center_crop __UpperCamelCase = crop_size __UpperCamelCase = do_rescale __UpperCamelCase = rescale_factor __UpperCamelCase = do_normalize __UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(F"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) if crop_pct is not None: if "shortest_edge" in size: __UpperCamelCase = int(size['''shortest_edge'''] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __UpperCamelCase = int(size['''height'''] / crop_pct ) else: __UpperCamelCase = (int(size['''height'''] / crop_pct ), int(size['''width'''] / crop_pct )) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) else: if "shortest_edge" in size: __UpperCamelCase = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ ) elif "height" in size and "width" in size: __UpperCamelCase = (size['''height'''], size['''width''']) else: raise ValueError('''Invalid size for resize: {}'''.format(SCREAMING_SNAKE_CASE_ ) ) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F"size must contain 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> str: '''simple docstring''' return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> np.ndarray: '''simple docstring''' return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , )-> PIL.Image.Image: '''simple docstring''' __UpperCamelCase = do_resize if do_resize is not None else self.do_resize __UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct __UpperCamelCase = resample if resample is not None else self.resample __UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase = image_mean if image_mean is not None else self.image_mean __UpperCamelCase = image_std if image_std is not None else self.image_std __UpperCamelCase = size if size is not None else self.size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = crop_size if crop_size is not None else self.crop_size __UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='''crop_size''' ) __UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_center_crop and crop_pct is None: raise ValueError('''Crop_pct must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if do_resize: __UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , crop_pct=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_center_crop: __UpperCamelCase = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: __UpperCamelCase = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: __UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] __UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
328
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : Tuple = logging.get_logger(__name__) lowercase__ : Dict = { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json", "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json", "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json", "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json" ), "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json", "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json", "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json", "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json", "cl-tohoku/bert-base-japanese-whole-word-masking": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json" ), "cl-tohoku/bert-base-japanese-char": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json" ), "cl-tohoku/bert-base-japanese-char-whole-word-masking": ( "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json" ), "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json", # See all BERT models at https://huggingface.co/models?filter=bert } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = 'bert' def __init__( self , SCREAMING_SNAKE_CASE_=30522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , )-> Optional[Any]: '''simple docstring''' super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = vocab_size __UpperCamelCase = hidden_size __UpperCamelCase = num_hidden_layers __UpperCamelCase = num_attention_heads __UpperCamelCase = hidden_act __UpperCamelCase = intermediate_size __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = max_position_embeddings __UpperCamelCase = type_vocab_size __UpperCamelCase = initializer_range __UpperCamelCase = layer_norm_eps __UpperCamelCase = position_embedding_type __UpperCamelCase = use_cache __UpperCamelCase = classifier_dropout class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" @property def A__ ( self )-> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __UpperCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __UpperCamelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
328
import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params lowercase__ : Any = getLogger(__name__) lowercase__ : List[str] = "cuda" if torch.cuda.is_available() else "cpu" def A_ ( snake_case : List[str] , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : str = DEFAULT_DEVICE , snake_case : List[str]=False , snake_case : Union[str, Any]="summarization" , snake_case : str=None , **snake_case : List[Any] , ) -> Dict: '''simple docstring''' __UpperCamelCase = Path(snake_case ).open('''w''' , encoding='''utf-8''' ) __UpperCamelCase = str(snake_case ) __UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).to(snake_case ) if fpaa: __UpperCamelCase = model.half() __UpperCamelCase = AutoTokenizer.from_pretrained(snake_case ) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type. __UpperCamelCase = time.time() # update config with task specific params use_task_specific_params(snake_case , snake_case ) if prefix is None: __UpperCamelCase = prefix or getattr(model.config , '''prefix''' , '''''' ) or '''''' for examples_chunk in tqdm(list(chunks(snake_case , snake_case ) ) ): __UpperCamelCase = [prefix + text for text in examples_chunk] __UpperCamelCase = tokenizer(snake_case , return_tensors='''pt''' , truncation=snake_case , padding='''longest''' ).to(snake_case ) __UpperCamelCase = model.generate( input_ids=batch.input_ids , attention_mask=batch.attention_mask , **snake_case , ) __UpperCamelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case ) for hypothesis in dec: fout.write(hypothesis + '''\n''' ) fout.flush() fout.close() __UpperCamelCase = int(time.time() - start_time ) # seconds __UpperCamelCase = len(snake_case ) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )} def A_ ( ) -> Tuple: '''simple docstring''' return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' ) def A_ ( snake_case : str=True ) -> int: '''simple docstring''' __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''model_name''' , type=snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' ) parser.add_argument('''input_path''' , type=snake_case , help='''like cnn_dm/test.source''' ) parser.add_argument('''save_path''' , type=snake_case , help='''where to save summaries''' ) parser.add_argument('''--reference_path''' , type=snake_case , required=snake_case , help='''like cnn_dm/test.target''' ) parser.add_argument('''--score_path''' , type=snake_case , required=snake_case , default='''metrics.json''' , help='''where to save metrics''' ) parser.add_argument('''--device''' , type=snake_case , required=snake_case , default=snake_case , help='''cuda, cuda:1, cpu etc.''' ) parser.add_argument( '''--prefix''' , type=snake_case , required=snake_case , default=snake_case , help='''will be added to the begininng of src examples''' ) parser.add_argument('''--task''' , type=snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' ) parser.add_argument('''--bs''' , type=snake_case , default=8 , required=snake_case , help='''batch size''' ) parser.add_argument( '''--n_obs''' , type=snake_case , default=-1 , required=snake_case , help='''How many observations. Defaults to all.''' ) parser.add_argument('''--fp16''' , action='''store_true''' ) parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' ) parser.add_argument( '''--info''' , nargs='''?''' , type=snake_case , const=datetime_now() , help=( '''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.''' ''' lang=en-ru. If no value is passed, the current datetime string will be used.''' ) , ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate __UpperCamelCase , __UpperCamelCase = parser.parse_known_args() __UpperCamelCase = parse_numeric_n_bool_cl_kwargs(snake_case ) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}" ) __UpperCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()] if args.n_obs > 0: __UpperCamelCase = examples[: args.n_obs] Path(args.save_path ).parent.mkdir(exist_ok=snake_case ) if args.reference_path is None and Path(args.score_path ).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c." ) if args.device == "cpu" and args.fpaa: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError('''Can\'t mix --fp16 and --device cpu''' ) __UpperCamelCase = generate_summaries_or_translations( snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **snake_case , ) if args.reference_path is None: return {} # Compute scores __UpperCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge __UpperCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()] __UpperCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(snake_case )] __UpperCamelCase = score_fn(snake_case , snake_case ) scores.update(snake_case ) if args.dump_args: scores.update(snake_case ) if args.info: __UpperCamelCase = args.info if verbose: print(snake_case ) if args.score_path is not None: json.dump(snake_case , open(args.score_path , '''w''' ) ) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
328
1
def A_ ( snake_case : str , snake_case : str ) -> str: '''simple docstring''' if not (isinstance(snake_case , snake_case ) and isinstance(snake_case , snake_case )): raise ValueError('''longest_common_substring() takes two strings for inputs''' ) __UpperCamelCase = len(snake_case ) __UpperCamelCase = len(snake_case ) __UpperCamelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] __UpperCamelCase = 0 __UpperCamelCase = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: __UpperCamelCase = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: __UpperCamelCase = i __UpperCamelCase = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
328
from math import factorial def A_ ( snake_case : int = 100 ) -> int: '''simple docstring''' return sum(int(snake_case ) for x in str(factorial(snake_case ) ) ) if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
328
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} lowercase__ : Optional[int] = { "vocab_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json" ), }, "merges_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt", "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt" ), }, "tokenizer_file": { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json", "roberta-base-openai-detector": ( "https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json" ), "roberta-large-openai-detector": ( "https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json" ), }, } lowercase__ : Optional[Any] = { "roberta-base": 5_1_2, "roberta-large": 5_1_2, "roberta-large-mnli": 5_1_2, "distilroberta-base": 5_1_2, "roberta-base-openai-detector": 5_1_2, "roberta-large-openai-detector": 5_1_2, } class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" _snake_case = VOCAB_FILES_NAMES _snake_case = PRETRAINED_VOCAB_FILES_MAP _snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case = ['input_ids', 'attention_mask'] _snake_case = RobertaTokenizer def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="replace" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , )-> List[Any]: '''simple docstring''' super().__init__( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , errors=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) __UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: __UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('''type''' ) ) __UpperCamelCase = add_prefix_space __UpperCamelCase = pre_tok_class(**SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = add_prefix_space __UpperCamelCase = '''post_processor''' __UpperCamelCase = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if tokenizer_component_instance: __UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __UpperCamelCase = tuple(state['''sep'''] ) if "cls" in state: __UpperCamelCase = tuple(state['''cls'''] ) __UpperCamelCase = False if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE_ ) != add_prefix_space: __UpperCamelCase = add_prefix_space __UpperCamelCase = True if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE_ ) != trim_offsets: __UpperCamelCase = trim_offsets __UpperCamelCase = True if changes_to_apply: __UpperCamelCase = getattr(SCREAMING_SNAKE_CASE_ , state.pop('''type''' ) ) __UpperCamelCase = component_class(**SCREAMING_SNAKE_CASE_ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @property def A__ ( self )-> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def A__ ( self , SCREAMING_SNAKE_CASE_ )-> List[str]: '''simple docstring''' __UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else value __UpperCamelCase = value def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> BatchEncoding: '''simple docstring''' __UpperCamelCase = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> BatchEncoding: '''simple docstring''' __UpperCamelCase = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE_ ) assert self.add_prefix_space or not is_split_into_words, ( F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> Tuple[str]: '''simple docstring''' __UpperCamelCase = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ ) return tuple(SCREAMING_SNAKE_CASE_ ) def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None )-> Optional[Any]: '''simple docstring''' __UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None )-> List[int]: '''simple docstring''' __UpperCamelCase = [self.sep_token_id] __UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
328
def A_ ( snake_case : list ) -> list: '''simple docstring''' __UpperCamelCase = len(snake_case ) for i in range(1 , snake_case ): __UpperCamelCase = collection[i] __UpperCamelCase = 0 __UpperCamelCase = i - 1 while low <= high: __UpperCamelCase = (low + high) // 2 if val < collection[mid]: __UpperCamelCase = mid - 1 else: __UpperCamelCase = mid + 1 for j in range(snake_case , snake_case , -1 ): __UpperCamelCase = collection[j - 1] __UpperCamelCase = val return collection if __name__ == "__main__": lowercase__ : List[Any] = input("Enter numbers separated by a comma:\n").strip() lowercase__ : str = [int(item) for item in user_input.split(",")] print(binary_insertion_sort(unsorted))
328
1
def A_ ( snake_case : int , snake_case : List[Any] , snake_case : int ) -> Tuple: '''simple docstring''' if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(snake_case , n - 1 , snake_case ) * a) % mod else: __UpperCamelCase = binary_exponentiation(snake_case , n / 2 , snake_case ) return (b * b) % mod # a prime number lowercase__ : Optional[int] = 7_0_1 lowercase__ : Union[str, Any] = 1_0_0_0_0_0_0_0_0_0 lowercase__ : List[Any] = 1_0 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
328
from __future__ import annotations from collections import deque class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ )-> int: '''simple docstring''' __UpperCamelCase = [] self.adlist.append( {'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} ) for keyword in keywords: self.add_keyword(SCREAMING_SNAKE_CASE_ ) self.set_fail_transitions() def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> int | None: '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def A__ ( self , SCREAMING_SNAKE_CASE_ )-> None: '''simple docstring''' __UpperCamelCase = 0 for character in keyword: __UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if next_state is None: self.adlist.append( { '''value''': character, '''next_states''': [], '''fail_state''': 0, '''output''': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __UpperCamelCase = len(self.adlist ) - 1 else: __UpperCamelCase = next_state self.adlist[current_state]["output"].append(SCREAMING_SNAKE_CASE_ ) def A__ ( self )-> None: '''simple docstring''' __UpperCamelCase = deque() for node in self.adlist[0]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = 0 while q: __UpperCamelCase = q.popleft() for child in self.adlist[r]["next_states"]: q.append(SCREAMING_SNAKE_CASE_ ) __UpperCamelCase = self.adlist[r]['''fail_state'''] while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) is None and state != 0 ): __UpperCamelCase = self.adlist[state]['''fail_state'''] __UpperCamelCase = self.find_next_state( SCREAMING_SNAKE_CASE_ , self.adlist[child]['''value'''] ) if self.adlist[child]["fail_state"] is None: __UpperCamelCase = 0 __UpperCamelCase = ( self.adlist[child]['''output'''] + self.adlist[self.adlist[child]['''fail_state''']]['''output'''] ) def A__ ( self , SCREAMING_SNAKE_CASE_ )-> dict[str, list[int]]: '''simple docstring''' __UpperCamelCase = {} # returns a dict with keywords and list of its occurrences __UpperCamelCase = 0 for i in range(len(SCREAMING_SNAKE_CASE_ ) ): while ( self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) is None and current_state != 0 ): __UpperCamelCase = self.adlist[current_state]['''fail_state'''] __UpperCamelCase = self.find_next_state(SCREAMING_SNAKE_CASE_ , string[i] ) if next_state is None: __UpperCamelCase = 0 else: __UpperCamelCase = next_state for key in self.adlist[current_state]["output"]: if key not in result: __UpperCamelCase = [] result[key].append(i - len(SCREAMING_SNAKE_CASE_ ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
328
1