code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class __SCREAMING_SNAKE_CASE ( A__ ): A : bool = field(default=A__ , metadata={'help': 'Whether to use SortishSampler or not.'} ) A : bool = field( default=A__ , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) A : Optional[int] = field( default=A__ , metadata={ 'help': ( 'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `max_length` value of the model configuration.' ) } , ) A : Optional[int] = field( default=A__ , metadata={ 'help': ( 'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `num_beams` value of the model configuration.' ) } , ) A : Optional[Union[str, Path, GenerationConfig]] = field( default=A__ , metadata={ 'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.' } , ) def __lowerCamelCase ( self ): lowercase : List[str] = super().to_dict() for k, v in d.items(): if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = v.to_dict() return d
337
import logging import os from .state import PartialState class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ): @staticmethod def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ ) if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ): if self._should_log(SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) elif in_order: lowercase : List[Any] = PartialState() for i in range(state.num_processes ): if i == state.process_index: lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) state.wait_for_everyone() def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]: """simple docstring""" if log_level is None: lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase ) lowercase : str = logging.getLogger(_UpperCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_UpperCamelCase, {} )
337
1
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" if index == number_of_items: return 0 lowercase : Dict = 0 lowercase : Union[str, Any] = 0 lowercase : Optional[int] = knapsack(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, index + 1 ) if weights[index] <= max_weight: lowercase : str = values[index] + knapsack( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, max_weight - weights[index], index + 1 ) return max(_UpperCamelCase, _UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
337
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __SCREAMING_SNAKE_CASE ( pl.LightningModule ): def __init__( self , SCREAMING_SNAKE_CASE__ ): super().__init__() lowercase : Any = model lowercase : Optional[Any] = 2 lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __lowerCamelCase ( self ): pass def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase ) lowercase : int = LightningModel(_UpperCamelCase ) lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(_UpperCamelCase ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
337
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FocalNetForImageClassification''', '''FocalNetForMaskedImageModeling''', '''FocalNetBackbone''', '''FocalNetModel''', '''FocalNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __SCREAMING_SNAKE_CASE ( A__ ): A : Any = 'yolos' def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = hidden_size lowercase : int = num_hidden_layers lowercase : str = num_attention_heads lowercase : str = intermediate_size lowercase : Dict = hidden_act lowercase : int = hidden_dropout_prob lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : List[Any] = initializer_range lowercase : Optional[int] = layer_norm_eps lowercase : str = image_size lowercase : Dict = patch_size lowercase : str = num_channels lowercase : Optional[int] = qkv_bias lowercase : List[str] = num_detection_tokens lowercase : List[str] = use_mid_position_embeddings lowercase : Dict = auxiliary_loss # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : Any = bbox_cost lowercase : int = giou_cost # Loss coefficients lowercase : Dict = bbox_loss_coefficient lowercase : Optional[Any] = giou_loss_coefficient lowercase : Tuple = eos_coefficient class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = version.parse('1.11' ) @property def __lowerCamelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self ): return 1E-4 @property def __lowerCamelCase ( self ): return 12
337
1
def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" lowercase : Optional[Any] = len(_UpperCamelCase ) lowercase : Dict = sum(_UpperCamelCase ) lowercase : int = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1, n + 1 ): lowercase : Any = True for i in range(1, s + 1 ): lowercase : str = False for i in range(1, n + 1 ): for j in range(1, s + 1 ): lowercase : Any = dp[i][j - 1] if arr[i - 1] <= j: lowercase : int = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ), -1, -1 ): if dp[n][j] is True: lowercase : List[Any] = s - 2 * j break return diff
337
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version __a = { '''<''': operator.lt, '''<=''': operator.le, '''==''': operator.eq, '''!=''': operator.ne, '''>=''': operator.ge, '''>''': operator.gt, } def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]: """simple docstring""" if got_ver is None or want_ver is None: raise ValueError( f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider""" f""" reinstalling {pkg}.""" ) if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ): raise ImportError( f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" ) def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None: """simple docstring""" lowercase : List[Any] = f"""\n{hint}""" if hint is not None else '''''' # non-versioned check if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ): lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None else: lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but''' f""" got {requirement}""" ) lowercase , lowercase : str = match[0] lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements lowercase : List[Any] = {} for w in want_range: lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,''' f""" but got {requirement}""" ) lowercase , lowercase : Optional[int] = match[0] lowercase : Dict = want_ver if op not in ops: raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" ) # special case if pkg == "python": lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) return # check if any version is installed try: lowercase : List[str] = importlib.metadata.version(_UpperCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main''' return require_version(_UpperCamelCase, _UpperCamelCase )
337
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) lowercase : Optional[int] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) lowercase : Union[str, Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , ) return model @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) lowercase : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : List[Any] = self.dummy_uncond_unet lowercase : Optional[int] = DDIMScheduler() lowercase : Optional[int] = self.dummy_vq_model lowercase : Optional[Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE__ , vqvae=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) ldm.to(SCREAMING_SNAKE_CASE__ ) ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = torch.manual_seed(0 ) lowercase : Dict = ldm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='''numpy''' ).images lowercase : Union[str, Any] = torch.manual_seed(0 ) lowercase : List[str] = ldm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=2 , output_type='''numpy''' , return_dict=SCREAMING_SNAKE_CASE__ )[0] lowercase : str = image[0, -3:, -3:, -1] lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase : Union[str, Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] ) lowercase : Tuple = 1E-2 if torch_device != '''mps''' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance @slow @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): lowercase : Optional[Any] = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' ) ldm.to(SCREAMING_SNAKE_CASE__ ) ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = torch.manual_seed(0 ) lowercase : Tuple = ldm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , output_type='''numpy''' ).images lowercase : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowercase : Any = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] ) lowercase : Union[str, Any] = 1E-2 if torch_device != '''mps''' else 3E-2 assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
337
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __a = logging.get_logger(__name__) __a = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'deta' A : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ): if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = backbone_config.pop('''model_type''' ) lowercase : Any = CONFIG_MAPPING[backbone_model_type] lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = backbone_config lowercase : Union[str, Any] = num_queries lowercase : Any = max_position_embeddings lowercase : int = d_model lowercase : Any = encoder_ffn_dim lowercase : Optional[int] = encoder_layers lowercase : Tuple = encoder_attention_heads lowercase : Optional[Any] = decoder_ffn_dim lowercase : Optional[int] = decoder_layers lowercase : int = decoder_attention_heads lowercase : Any = dropout lowercase : int = attention_dropout lowercase : Dict = activation_dropout lowercase : int = activation_function lowercase : Dict = init_std lowercase : List[str] = init_xavier_std lowercase : Optional[Any] = encoder_layerdrop lowercase : Tuple = auxiliary_loss lowercase : Tuple = position_embedding_type # deformable attributes lowercase : List[str] = num_feature_levels lowercase : Tuple = encoder_n_points lowercase : Optional[int] = decoder_n_points lowercase : Tuple = two_stage lowercase : Optional[Any] = two_stage_num_proposals lowercase : Union[str, Any] = with_box_refine lowercase : Any = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : str = bbox_cost lowercase : List[Any] = giou_cost # Loss coefficients lowercase : Tuple = mask_loss_coefficient lowercase : Any = dice_loss_coefficient lowercase : Dict = bbox_loss_coefficient lowercase : Tuple = giou_loss_coefficient lowercase : Union[str, Any] = eos_coefficient lowercase : Tuple = focal_alpha super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCamelCase ( self ): return self.encoder_attention_heads @property def __lowerCamelCase ( self ): return self.d_model def __lowerCamelCase ( self ): lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Any = self.backbone_config.to_dict() lowercase : List[str] = self.__class__.model_type return output
337
1
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() __a = logging.get_logger('''transformers.models.speecht5''') def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" hf_model.apply_weight_norm() lowercase : str = checkpoint['''input_conv.weight_g'''] lowercase : Optional[Any] = checkpoint['''input_conv.weight_v'''] lowercase : Tuple = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): lowercase : List[Any] = checkpoint[f"""upsamples.{i}.1.weight_g"""] lowercase : Dict = checkpoint[f"""upsamples.{i}.1.weight_v"""] lowercase : Any = checkpoint[f"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): lowercase : Dict = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""] lowercase : Dict = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""] lowercase : List[str] = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""] lowercase : int = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""] lowercase : str = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""] lowercase : str = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""] lowercase : Dict = checkpoint['''output_conv.1.weight_g'''] lowercase : List[str] = checkpoint['''output_conv.1.weight_v'''] lowercase : str = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None, _UpperCamelCase=None, ) ->Tuple: """simple docstring""" if config_path is not None: lowercase : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(_UpperCamelCase ) else: lowercase : Optional[int] = SpeechTaHifiGanConfig() lowercase : Optional[int] = SpeechTaHifiGan(_UpperCamelCase ) lowercase : List[Any] = torch.load(_UpperCamelCase ) load_weights(orig_checkpoint['''model''']['''generator'''], _UpperCamelCase, _UpperCamelCase ) lowercase : Optional[Any] = np.load(_UpperCamelCase ) lowercase : List[str] = stats[0].reshape(-1 ) lowercase : List[Any] = stats[1].reshape(-1 ) lowercase : Dict = torch.from_numpy(_UpperCamelCase ).float() lowercase : Union[str, Any] = torch.from_numpy(_UpperCamelCase ).float() model.save_pretrained(_UpperCamelCase ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) __a = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
337
def __lowercase ( ) ->List[Any]: """simple docstring""" lowercase : Union[str, Any] = 0 for i in range(1, 1001 ): total += i**i return str(_UpperCamelCase )[-10:] if __name__ == "__main__": print(solution())
337
1
from collections.abc import Callable class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ = None ): # Stores actual heap items. lowercase : list = [] # Stores indexes of each item for supporting updates and deletion. lowercase : dict = {} # Stores current size of heap. lowercase : str = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return int((i - 1) / 2 ) if i > 0 else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : Dict = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. lowercase , lowercase : int = self.arr[j], self.arr[i] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.arr[i][1] < self.arr[j][1] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : int = self._left(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = i if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = left if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = right return valid_parent def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ ) while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) while valid_parent != index: self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : str = self.pos_map[item] lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : List[str] = self.pos_map[item] del self.pos_map[item] lowercase : Optional[int] = self.arr[self.size - 1] lowercase : int = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] ) else: lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )] lowercase : str = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowerCamelCase ( self ): return self.arr[0] if self.size else None def __lowerCamelCase ( self ): lowercase : str = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def __lowercase ( ) ->None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
337
import os import re import shutil import sys import tempfile import unittest import black __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __a = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): lowercase : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) lowercase : Any = self.diffusers_dir shutil.copy( os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCamelCase ( self ): lowercase : List[Any] = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with a really long name lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
337
1
def __lowercase ( ) ->List[Any]: """simple docstring""" lowercase : Union[str, Any] = 0 for i in range(1, 1001 ): total += i**i return str(_UpperCamelCase )[-10:] if __name__ == "__main__": print(solution())
337
import math class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1 lowercase : List[Any] = n lowercase : List[Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # adjacency matrix for weight lowercase : Union[str, Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # dp[i][j] stores minimum distance from i to j def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : int = w def __lowerCamelCase ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.dp[u][v] if __name__ == "__main__": __a = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
337
1
def __lowercase ( _UpperCamelCase ) ->str: """simple docstring""" lowercase : Union[str, Any] = int(_UpperCamelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_UpperCamelCase ) lowercase , lowercase : Optional[int] = divmod(_UpperCamelCase, 2 ) return binary_recursive(_UpperCamelCase ) + str(_UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->str: """simple docstring""" lowercase : str = str(_UpperCamelCase ).strip() if not number: raise ValueError('''No input value was provided''' ) lowercase : List[Any] = '''-''' if number.startswith('''-''' ) else '''''' lowercase : List[Any] = number.lstrip('''-''' ) if not number.isnumeric(): raise ValueError('''Input value is not an integer''' ) return f"""{negative}0b{binary_recursive(int(_UpperCamelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
337
from __future__ import annotations def __lowercase ( _UpperCamelCase ) ->float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(_UpperCamelCase ) / len(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
337
1
from __future__ import annotations __a = [] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" for i in range(len(_UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" if row >= len(_UpperCamelCase ): solution.append(_UpperCamelCase ) printboard(_UpperCamelCase ) print() return True for i in range(len(_UpperCamelCase ) ): if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : int = 1 solve(_UpperCamelCase, row + 1 ) lowercase : Tuple = 0 return False def __lowercase ( _UpperCamelCase ) ->None: """simple docstring""" for i in range(len(_UpperCamelCase ) ): for j in range(len(_UpperCamelCase ) ): if board[i][j] == 1: print('''Q''', end=''' ''' ) else: print('''.''', end=''' ''' ) print() # n=int(input("The no. of queens")) __a = 8 __a = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
337
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __a = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
337
1
from __future__ import annotations def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->list[list[str]]: """simple docstring""" lowercase : Dict = word_bank or [] # create a table lowercase : int = len(_UpperCamelCase ) + 1 lowercase : list[list[list[str]]] = [] for _ in range(_UpperCamelCase ): table.append([] ) # seed value lowercase : List[Any] = [[]] # because empty string has empty combination # iterate through the indices for i in range(_UpperCamelCase ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(_UpperCamelCase )] == word: lowercase : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(_UpperCamelCase )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(_UpperCamelCase )]: combination.reverse() return table[len(_UpperCamelCase )] if __name__ == "__main__": print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa'''])) print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t'''])) print( all_construct( '''hexagonosaurus''', ['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''], ) )
337
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __a = logging.get_logger(__name__) def __lowercase ( _UpperCamelCase ) ->List[int]: """simple docstring""" if isinstance(_UpperCamelCase, np.ndarray ): return list(tensor.shape ) lowercase : Optional[Any] = tf.shape(_UpperCamelCase ) if tensor.shape == tf.TensorShape(_UpperCamelCase ): return dynamic lowercase : Tuple = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )] def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase : int = [1] * inputs.shape.rank lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis] lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase ) lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase ) # Compute layer normalization using the batch_normalization # function. lowercase : List[str] = tf.nn.batch_normalization( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, ) return outputs def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase : Dict = tf.shape(_UpperCamelCase ) lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 ) return tf.reshape(_UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->tf.Tensor: """simple docstring""" if not isinstance(_UpperCamelCase, tf.Tensor ): lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase : str = ( tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None: """simple docstring""" tf.debugging.assert_less( _UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ), ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : List[Any] = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) lowercase : Any = np.asarray(_UpperCamelCase ) lowercase : List[Any] = 1 lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_UpperCamelCase ): lowercase : Optional[int] = chunk_data else: lowercase : int = data def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" if name in group.attrs: lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]] else: lowercase : Optional[Any] = [] lowercase : List[str] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def __lowercase ( _UpperCamelCase ) ->List[str]: """simple docstring""" def _expand_single_ad_tensor(_UpperCamelCase ): if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_UpperCamelCase, axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
337
1
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->float: """simple docstring""" if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) lowercase : Union[str, Any] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_UpperCamelCase ) ) return round(_UpperCamelCase, ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
337
def __lowercase ( _UpperCamelCase = 4000000 ) ->int: """simple docstring""" lowercase : int = [] lowercase , lowercase : str = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCamelCase ) lowercase , lowercase : Dict = b, a + b return sum(_UpperCamelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
337
1
from ....configuration_utils import PretrainedConfig from ....utils import logging __a = logging.get_logger(__name__) # TODO: upload to AWS __a = { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json''' ), } class __SCREAMING_SNAKE_CASE ( A__ ): A : Optional[Any] = 'retribert' def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=0 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = vocab_size lowercase : Any = hidden_size lowercase : Optional[Any] = num_hidden_layers lowercase : List[str] = num_attention_heads lowercase : str = hidden_act lowercase : Dict = intermediate_size lowercase : Union[str, Any] = hidden_dropout_prob lowercase : str = attention_probs_dropout_prob lowercase : List[str] = max_position_embeddings lowercase : int = type_vocab_size lowercase : Union[str, Any] = initializer_range lowercase : str = layer_norm_eps lowercase : List[Any] = share_encoders lowercase : str = projection_dim
337
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __a = logging.get_logger(__name__) __a = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'perceiver' def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Any = num_latents lowercase : Union[str, Any] = d_latents lowercase : str = d_model lowercase : int = num_blocks lowercase : str = num_self_attends_per_block lowercase : List[str] = num_self_attention_heads lowercase : List[str] = num_cross_attention_heads lowercase : int = qk_channels lowercase : List[Any] = v_channels lowercase : int = cross_attention_shape_for_attention lowercase : Tuple = self_attention_widening_factor lowercase : Dict = cross_attention_widening_factor lowercase : Any = hidden_act lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : Union[str, Any] = initializer_range lowercase : Any = layer_norm_eps lowercase : Any = use_query_residual # masked language modeling attributes lowercase : List[str] = vocab_size lowercase : Dict = max_position_embeddings # image classification attributes lowercase : int = image_size # flow attributes lowercase : List[Any] = train_size # multimodal autoencoding attributes lowercase : List[Any] = num_frames lowercase : Union[str, Any] = audio_samples_per_frame lowercase : int = samples_per_patch lowercase : Optional[int] = output_shape class __SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCamelCase ( self ): if self.task == "multiple-choice": lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def __lowerCamelCase ( self ): return 1E-4 def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : str = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ ) # Generate dummy inputs according to compute batch and sequence lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''input_ids''' ) return inputs elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch ) lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
337
1
from __future__ import annotations def __lowercase ( _UpperCamelCase ) ->list[int]: """simple docstring""" return [ord(_UpperCamelCase ) - 96 for elem in plain] def __lowercase ( _UpperCamelCase ) ->str: """simple docstring""" return "".join(chr(elem + 96 ) for elem in encoded ) def __lowercase ( ) ->None: """simple docstring""" lowercase : Optional[Any] = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', _UpperCamelCase ) print('''Decoded:''', decode(_UpperCamelCase ) ) if __name__ == "__main__": main()
337
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __lowercase ( _UpperCamelCase = 8 ) ->str: """simple docstring""" lowercase : List[str] = ascii_letters + digits + punctuation return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str: """simple docstring""" i -= len(_UpperCamelCase ) lowercase : Dict = i // 3 lowercase : List[str] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowercase : Union[str, Any] = ( chars_incl + random(_UpperCamelCase, quotient + remainder ) + random(_UpperCamelCase, _UpperCamelCase ) + random(_UpperCamelCase, _UpperCamelCase ) ) lowercase : Union[str, Any] = list(_UpperCamelCase ) shuffle(_UpperCamelCase ) return "".join(_UpperCamelCase ) # random is a generalised function for letters, characters and numbers def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str: """simple docstring""" return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool: """simple docstring""" if len(_UpperCamelCase ) < min_length: # Your Password must be at least 8 characters long return False lowercase : str = any(char in ascii_uppercase for char in password ) lowercase : List[str] = any(char in ascii_lowercase for char in password ) lowercase : Dict = any(char in digits for char in password ) lowercase : Tuple = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __lowercase ( ) ->Dict: """simple docstring""" lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() ) lowercase : Optional[Any] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''', password_generator(_UpperCamelCase ) ) print( '''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
337
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __a = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
from __future__ import annotations __a = [] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" for i in range(len(_UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" if row >= len(_UpperCamelCase ): solution.append(_UpperCamelCase ) printboard(_UpperCamelCase ) print() return True for i in range(len(_UpperCamelCase ) ): if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : int = 1 solve(_UpperCamelCase, row + 1 ) lowercase : Tuple = 0 return False def __lowercase ( _UpperCamelCase ) ->None: """simple docstring""" for i in range(len(_UpperCamelCase ) ): for j in range(len(_UpperCamelCase ) ): if board[i][j] == 1: print('''Q''', end=''' ''' ) else: print('''.''', end=''' ''' ) print() # n=int(input("The no. of queens")) __a = 8 __a = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
337
1
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel __a = HfApi() __a = {} # fmt: off __a = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) __a = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) __a = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) __a = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) __a = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) __a = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) __a = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) __a = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) __a = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) __a = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) __a = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) __a = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) __a = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) __a = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) __a = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on __a = api.list_models(filter='''diffusers''') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": __a = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith('''CompVis'''): __a = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''') else: __a = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) __a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) __a = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): __a = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1e-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
337
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
1
from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 1 / sqrt(2 ) ) ->IIRFilter: """simple docstring""" lowercase : Optional[Any] = tau * frequency / samplerate lowercase : Dict = sin(_UpperCamelCase ) lowercase : Tuple = cos(_UpperCamelCase ) lowercase : str = _sin / (2 * q_factor) lowercase : List[Any] = (1 - _cos) / 2 lowercase : int = 1 - _cos lowercase : Dict = 1 + alpha lowercase : Any = -2 * _cos lowercase : Union[str, Any] = 1 - alpha lowercase : List[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 1 / sqrt(2 ) ) ->IIRFilter: """simple docstring""" lowercase : str = tau * frequency / samplerate lowercase : List[Any] = sin(_UpperCamelCase ) lowercase : int = cos(_UpperCamelCase ) lowercase : List[Any] = _sin / (2 * q_factor) lowercase : List[Any] = (1 + _cos) / 2 lowercase : Optional[Any] = -1 - _cos lowercase : Tuple = 1 + alpha lowercase : Tuple = -2 * _cos lowercase : Optional[int] = 1 - alpha lowercase : Dict = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 1 / sqrt(2 ) ) ->IIRFilter: """simple docstring""" lowercase : Dict = tau * frequency / samplerate lowercase : str = sin(_UpperCamelCase ) lowercase : Optional[int] = cos(_UpperCamelCase ) lowercase : Optional[int] = _sin / (2 * q_factor) lowercase : Optional[int] = _sin / 2 lowercase : Tuple = 0 lowercase : List[str] = -ba lowercase : int = 1 + alpha lowercase : Any = -2 * _cos lowercase : Any = 1 - alpha lowercase : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 1 / sqrt(2 ) ) ->IIRFilter: """simple docstring""" lowercase : str = tau * frequency / samplerate lowercase : Union[str, Any] = sin(_UpperCamelCase ) lowercase : Any = cos(_UpperCamelCase ) lowercase : Optional[Any] = _sin / (2 * q_factor) lowercase : List[str] = 1 - alpha lowercase : List[Any] = -2 * _cos lowercase : Union[str, Any] = 1 + alpha lowercase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba], [ba, ba, ba] ) return filt def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 1 / sqrt(2 ), ) ->IIRFilter: """simple docstring""" lowercase : Optional[int] = tau * frequency / samplerate lowercase : Tuple = sin(_UpperCamelCase ) lowercase : Union[str, Any] = cos(_UpperCamelCase ) lowercase : List[Any] = _sin / (2 * q_factor) lowercase : Optional[Any] = 10 ** (gain_db / 40) lowercase : List[str] = 1 + alpha * big_a lowercase : str = -2 * _cos lowercase : Tuple = 1 - alpha * big_a lowercase : int = 1 + alpha / big_a lowercase : Optional[Any] = -2 * _cos lowercase : Optional[Any] = 1 - alpha / big_a lowercase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 1 / sqrt(2 ), ) ->IIRFilter: """simple docstring""" lowercase : Tuple = tau * frequency / samplerate lowercase : List[Any] = sin(_UpperCamelCase ) lowercase : List[str] = cos(_UpperCamelCase ) lowercase : Union[str, Any] = _sin / (2 * q_factor) lowercase : Dict = 10 ** (gain_db / 40) lowercase : Optional[int] = (big_a + 1) - (big_a - 1) * _cos lowercase : str = (big_a + 1) + (big_a - 1) * _cos lowercase : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos lowercase : int = (big_a - 1) + (big_a + 1) * _cos lowercase : str = 2 * sqrt(_UpperCamelCase ) * alpha lowercase : Dict = big_a * (pmc + aaa) lowercase : List[str] = 2 * big_a * mpc lowercase : Optional[int] = big_a * (pmc - aaa) lowercase : Tuple = ppmc + aaa lowercase : Optional[int] = -2 * pmpc lowercase : Union[str, Any] = ppmc - aaa lowercase : Any = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 1 / sqrt(2 ), ) ->IIRFilter: """simple docstring""" lowercase : Dict = tau * frequency / samplerate lowercase : int = sin(_UpperCamelCase ) lowercase : Optional[Any] = cos(_UpperCamelCase ) lowercase : Optional[int] = _sin / (2 * q_factor) lowercase : List[str] = 10 ** (gain_db / 40) lowercase : Any = (big_a + 1) - (big_a - 1) * _cos lowercase : Dict = (big_a + 1) + (big_a - 1) * _cos lowercase : List[str] = (big_a - 1) - (big_a + 1) * _cos lowercase : List[str] = (big_a - 1) + (big_a + 1) * _cos lowercase : List[str] = 2 * sqrt(_UpperCamelCase ) * alpha lowercase : Dict = big_a * (ppmc + aaa) lowercase : Dict = -2 * big_a * pmpc lowercase : Optional[int] = big_a * (ppmc - aaa) lowercase : Dict = pmc + aaa lowercase : List[Any] = 2 * mpc lowercase : Union[str, Any] = pmc - aaa lowercase : Union[str, Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa], [ba, ba, ba] ) return filt
337
from collections.abc import Callable class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ = None ): # Stores actual heap items. lowercase : list = [] # Stores indexes of each item for supporting updates and deletion. lowercase : dict = {} # Stores current size of heap. lowercase : str = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return int((i - 1) / 2 ) if i > 0 else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : Dict = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. lowercase , lowercase : int = self.arr[j], self.arr[i] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.arr[i][1] < self.arr[j][1] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : int = self._left(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = i if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = left if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = right return valid_parent def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ ) while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) while valid_parent != index: self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : str = self.pos_map[item] lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : List[str] = self.pos_map[item] del self.pos_map[item] lowercase : Optional[int] = self.arr[self.size - 1] lowercase : int = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] ) else: lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )] lowercase : str = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowerCamelCase ( self ): return self.arr[0] if self.size else None def __lowerCamelCase ( self ): lowercase : str = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def __lowercase ( ) ->None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
337
1
from __future__ import annotations __a = 8.988e9 # units = N * m^s * C^-2 def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->dict[str, float]: """simple docstring""" lowercase : Tuple = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: lowercase : List[Any] = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: lowercase : List[str] = abs(_UpperCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: lowercase : str = abs(_UpperCamelCase ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: lowercase : Union[str, Any] = (COULOMBS_CONSTANT * charge_product / abs(_UpperCamelCase )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
337
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __SCREAMING_SNAKE_CASE ( A__ ): A : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
337
1
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __SCREAMING_SNAKE_CASE ( A__ ): A : Union[str, Any] = ['image_processor', 'tokenizer'] A : str = 'AutoImageProcessor' A : Dict = 'AutoTokenizer' def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : int = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowercase : Optional[int] = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if images is not None: lowercase : Optional[Any] = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) if text is not None and images is not None: lowercase : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCamelCase ( self ): return ["input_ids", "attention_mask", "pixel_values"]
337
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __a = logging.get_logger(__name__) __a = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __a = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } __a = {'''facebook/blenderbot-3B''': 1_28} class __SCREAMING_SNAKE_CASE ( A__ ): A : Dict = VOCAB_FILES_NAMES A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[int] = ['input_ids', 'attention_mask'] A : str = BlenderbotTokenizer def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ): super().__init__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) ) lowercase : str = add_prefix_space lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = add_prefix_space lowercase : str = '''post_processor''' lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if tokenizer_component_instance: lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase : Tuple = tuple(state['''sep'''] ) if "cls" in state: lowercase : Union[str, Any] = tuple(state['''cls'''] ) lowercase : Optional[int] = False if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: lowercase : Any = add_prefix_space lowercase : Tuple = True if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets: lowercase : List[str] = trim_offsets lowercase : Optional[int] = True if changes_to_apply: lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) ) lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __lowerCamelCase ( self ): if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value lowercase : Any = value def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : Tuple = [self.sep_token_id] lowercase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): return token_ids_a + [self.eos_token_id] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ ) lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length: lowercase : Tuple = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
337
1
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __SCREAMING_SNAKE_CASE ( pl.LightningModule ): def __init__( self , SCREAMING_SNAKE_CASE__ ): super().__init__() lowercase : Any = model lowercase : Optional[Any] = 2 lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __lowerCamelCase ( self ): pass def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase ) lowercase : int = LightningModel(_UpperCamelCase ) lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(_UpperCamelCase ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
337
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowercase ( ) ->int: """simple docstring""" lowercase : Tuple = HfArgumentParser(_UpperCamelCase ) lowercase : List[str] = parser.parse_args_into_dataclasses()[0] lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase ) try: lowercase : Any = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] ) lowercase : Any = '''''' lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] ) lowercase : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase ) raise ValueError(_UpperCamelCase ) benchmark.run() if __name__ == "__main__": main()
337
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __a = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( A__ ): A : Dict = 'vision-encoder-decoder' A : Optional[int] = True def __init__( self , **SCREAMING_SNAKE_CASE__ ): super().__init__(**SCREAMING_SNAKE_CASE__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) lowercase : Optional[int] = kwargs.pop('''encoder''' ) lowercase : str = encoder_config.pop('''model_type''' ) lowercase : Optional[Any] = kwargs.pop('''decoder''' ) lowercase : Any = decoder_config.pop('''model_type''' ) lowercase : Optional[int] = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) lowercase : int = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) lowercase : int = True @classmethod def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowercase : Optional[Any] = True lowercase : Optional[Any] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ ) lowercase : List[Any] = self.encoder.to_dict() lowercase : Dict = self.decoder.to_dict() lowercase : List[Any] = self.__class__.model_type return output class __SCREAMING_SNAKE_CASE ( A__ ): A : str = version.parse('1.11' ) @property def __lowerCamelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self ): return 1E-4 @property def __lowerCamelCase ( self ): return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class __SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCamelCase ( self ): lowercase : Dict = OrderedDict() lowercase : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : Optional[int] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowercase : Optional[Any] = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , ): import torch lowercase : List[Any] = OrderedDict() lowercase : Optional[Any] = super().generate_dummy_inputs( SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , seq_length=SCREAMING_SNAKE_CASE__ , is_pair=SCREAMING_SNAKE_CASE__ , framework=SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Optional[int] = dummy_input['''input_ids'''].shape lowercase : Dict = (batch, encoder_sequence, self._config.encoder_hidden_size) lowercase : Dict = dummy_input.pop('''input_ids''' ) lowercase : Union[str, Any] = dummy_input.pop('''attention_mask''' ) lowercase : Tuple = torch.zeros(SCREAMING_SNAKE_CASE__ ) return common_inputs class __SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "default" ): lowercase : int = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
337
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = [False] * len(_UpperCamelCase ) lowercase : Optional[int] = [] queue.append(_UpperCamelCase ) lowercase : Union[str, Any] = True while queue: lowercase : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCamelCase ) lowercase : Tuple = True lowercase : Optional[Any] = u return visited[t] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" lowercase : List[str] = [-1] * (len(_UpperCamelCase )) lowercase : int = 0 while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : List[str] = float('''Inf''' ) lowercase : int = sink while s != source: # Find the minimum value in select path lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] ) lowercase : Union[str, Any] = parent[s] max_flow += path_flow lowercase : Optional[int] = sink while v != source: lowercase : Any = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : Union[str, Any] = parent[v] return max_flow __a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __a , __a = 0, 5 print(ford_fulkerson(graph, source, sink))
337
1
def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" if not isinstance(_UpperCamelCase, _UpperCamelCase ): lowercase : List[str] = f"""Input value of [number={number}] must be an integer""" raise TypeError(_UpperCamelCase ) if number < 1: lowercase : str = f"""Input value of [number={number}] must be > 0""" raise ValueError(_UpperCamelCase ) lowercase : str = 1 for i in range(1, _UpperCamelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
337
from typing import List from .keymap import KEYMAP, get_character def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += [key] setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator def __lowercase ( *_UpperCamelCase ) ->Any: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += keys setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator class __SCREAMING_SNAKE_CASE ( A__ ): def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ): setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} ) setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] ) for key in handled_keys: lowercase : List[Any] = value return new_cls @staticmethod def __lowerCamelCase ( cls ): lowercase : Dict = get_character() if char != KEYMAP["undefined"]: lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ ) if handler: lowercase : Tuple = char return handler(cls ) else: return None def __lowercase ( cls ) ->Any: """simple docstring""" return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
337
1
import math def __lowercase ( _UpperCamelCase = 100 ) ->int: """simple docstring""" lowercase : Tuple = sum(i * i for i in range(1, n + 1 ) ) lowercase : List[Any] = int(math.pow(sum(range(1, n + 1 ) ), 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F'''{solution() = }''')
337
import logging import os from .state import PartialState class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ): @staticmethod def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ ) if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ): if self._should_log(SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) elif in_order: lowercase : List[Any] = PartialState() for i in range(state.num_processes ): if i == state.process_index: lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) state.wait_for_everyone() def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]: """simple docstring""" if log_level is None: lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase ) lowercase : str = logging.getLogger(_UpperCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_UpperCamelCase, {} )
337
1
import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" random.seed(_UpperCamelCase ) np.random.seed(_UpperCamelCase ) torch.manual_seed(_UpperCamelCase ) torch.cuda.manual_seed_all(_UpperCamelCase ) # ^^ safe to call this function even if cuda is not available class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.9999 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = 0 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = 2 / 3 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): if isinstance(SCREAMING_SNAKE_CASE__ , torch.nn.Module ): lowercase : List[str] = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ , ) lowercase : str = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility lowercase : Optional[int] = True if kwargs.get('''max_value''' , SCREAMING_SNAKE_CASE__ ) is not None: lowercase : Any = '''The `max_value` argument is deprecated. Please use `decay` instead.''' deprecate('''max_value''' , '''1.0.0''' , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ ) lowercase : str = kwargs['''max_value'''] if kwargs.get('''min_value''' , SCREAMING_SNAKE_CASE__ ) is not None: lowercase : Any = '''The `min_value` argument is deprecated. Please use `min_decay` instead.''' deprecate('''min_value''' , '''1.0.0''' , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = kwargs['''min_value'''] lowercase : Dict = list(SCREAMING_SNAKE_CASE__ ) lowercase : str = [p.clone().detach() for p in parameters] if kwargs.get('''device''' , SCREAMING_SNAKE_CASE__ ) is not None: lowercase : int = '''The `device` argument is deprecated. Please use `to` instead.''' deprecate('''device''' , '''1.0.0''' , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ ) self.to(device=kwargs['''device'''] ) lowercase : List[str] = None lowercase : List[Any] = decay lowercase : Union[str, Any] = min_decay lowercase : Union[str, Any] = update_after_step lowercase : Dict = use_ema_warmup lowercase : Optional[Any] = inv_gamma lowercase : Tuple = power lowercase : Dict = 0 lowercase : Dict = None # set in `step()` lowercase : Dict = model_cls lowercase : Tuple = model_config @classmethod def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : str = model_cls.load_config(SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = model_cls.from_pretrained(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = cls(model.parameters() , model_cls=SCREAMING_SNAKE_CASE__ , model_config=model.config ) ema_model.load_state_dict(SCREAMING_SNAKE_CASE__ ) return ema_model def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if self.model_cls is None: raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' ) if self.model_config is None: raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' ) lowercase : Optional[int] = self.model_cls.from_config(self.model_config ) lowercase : List[Any] = self.state_dict() state_dict.pop('''shadow_params''' , SCREAMING_SNAKE_CASE__ ) model.register_to_config(**SCREAMING_SNAKE_CASE__ ) self.copy_to(model.parameters() ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: lowercase : Union[str, Any] = 1 - (1 + step / self.inv_gamma) ** -self.power else: lowercase : List[str] = (1 + step) / (10 + step) lowercase : List[Any] = min(SCREAMING_SNAKE_CASE__ , self.decay ) # make sure decay is not smaller than min_decay lowercase : Tuple = max(SCREAMING_SNAKE_CASE__ , self.min_decay ) return cur_decay_value @torch.no_grad() def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if isinstance(SCREAMING_SNAKE_CASE__ , torch.nn.Module ): lowercase : List[str] = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , SCREAMING_SNAKE_CASE__ , standard_warn=SCREAMING_SNAKE_CASE__ , ) lowercase : str = parameters.parameters() lowercase : Optional[int] = list(SCREAMING_SNAKE_CASE__ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. lowercase : Any = self.get_decay(self.optimization_step ) lowercase : str = decay lowercase : List[str] = 1 - decay lowercase : Union[str, Any] = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , SCREAMING_SNAKE_CASE__ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): lowercase : str = deepspeed.zero.GatheredParameters(SCREAMING_SNAKE_CASE__ , modifier_rank=SCREAMING_SNAKE_CASE__ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = list(SCREAMING_SNAKE_CASE__ ) for s_param, param in zip(self.shadow_params , SCREAMING_SNAKE_CASE__ ): param.data.copy_(s_param.to(param.device ).data ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ): lowercase : Union[str, Any] = [ p.to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ) if p.is_floating_point() else p.to(device=SCREAMING_SNAKE_CASE__ ) for p in self.shadow_params ] def __lowerCamelCase ( self ): return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = [param.detach().cpu().clone() for param in parameters] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if self.temp_stored_params is None: raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' ) for c_param, param in zip(self.temp_stored_params , SCREAMING_SNAKE_CASE__ ): param.data.copy_(c_param.data ) # Better memory-wise. lowercase : Optional[Any] = None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : int = copy.deepcopy(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = state_dict.get('''decay''' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('''Decay must be between 0 and 1''' ) lowercase : Tuple = state_dict.get('''min_decay''' , self.min_decay ) if not isinstance(self.min_decay , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Invalid min_decay''' ) lowercase : List[Any] = state_dict.get('''optimization_step''' , self.optimization_step ) if not isinstance(self.optimization_step , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Invalid optimization_step''' ) lowercase : Optional[Any] = state_dict.get('''update_after_step''' , self.update_after_step ) if not isinstance(self.update_after_step , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Invalid update_after_step''' ) lowercase : Optional[Any] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , SCREAMING_SNAKE_CASE__ ): raise ValueError('''Invalid use_ema_warmup''' ) lowercase : int = state_dict.get('''inv_gamma''' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('''Invalid inv_gamma''' ) lowercase : Optional[int] = state_dict.get('''power''' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('''Invalid power''' ) lowercase : List[str] = state_dict.get('''shadow_params''' , SCREAMING_SNAKE_CASE__ ) if shadow_params is not None: lowercase : List[Any] = shadow_params if not isinstance(self.shadow_params , SCREAMING_SNAKE_CASE__ ): raise ValueError('''shadow_params must be a list''' ) if not all(isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) for p in self.shadow_params ): raise ValueError('''shadow_params must all be Tensors''' )
337
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __SCREAMING_SNAKE_CASE ( pl.LightningModule ): def __init__( self , SCREAMING_SNAKE_CASE__ ): super().__init__() lowercase : Any = model lowercase : Optional[Any] = 2 lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __lowerCamelCase ( self ): pass def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase ) lowercase : int = LightningModel(_UpperCamelCase ) lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(_UpperCamelCase ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
337
1
import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib __a = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } __a = logging.WARNING def __lowercase ( ) ->Union[str, Any]: """simple docstring""" lowercase : Optional[int] = os.getenv('''DATASETS_VERBOSITY''', _UpperCamelCase ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ', '.join(log_levels.keys() ) }""" ) return _default_log_level def __lowercase ( ) ->str: """simple docstring""" return __name__.split('''.''' )[0] def __lowercase ( ) ->logging.Logger: """simple docstring""" return logging.getLogger(_get_library_name() ) def __lowercase ( ) ->None: """simple docstring""" lowercase : List[Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def __lowercase ( ) ->None: """simple docstring""" lowercase : List[Any] = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def __lowercase ( _UpperCamelCase = None ) ->logging.Logger: """simple docstring""" if name is None: lowercase : Optional[int] = _get_library_name() return logging.getLogger(_UpperCamelCase ) def __lowercase ( ) ->int: """simple docstring""" return _get_library_root_logger().getEffectiveLevel() def __lowercase ( _UpperCamelCase ) ->None: """simple docstring""" _get_library_root_logger().setLevel(_UpperCamelCase ) def __lowercase ( ) ->Dict: """simple docstring""" return set_verbosity(_UpperCamelCase ) def __lowercase ( ) ->Any: """simple docstring""" return set_verbosity(_UpperCamelCase ) def __lowercase ( ) ->int: """simple docstring""" return set_verbosity(_UpperCamelCase ) def __lowercase ( ) ->Optional[int]: """simple docstring""" return set_verbosity(_UpperCamelCase ) def __lowercase ( ) ->None: """simple docstring""" lowercase : Union[str, Any] = False def __lowercase ( ) ->None: """simple docstring""" lowercase : Optional[int] = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class __SCREAMING_SNAKE_CASE : def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument lowercase : List[str] = args[0] if args else None def __iter__( self ): return iter(self._iterator ) def __getattr__( self , SCREAMING_SNAKE_CASE__ ): def empty_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): return self def __exit__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return __a = True class __SCREAMING_SNAKE_CASE : def __call__( self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ ): if _tqdm_active and not disable: return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) else: return EmptyTqdm(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): if _tqdm_active: return tqdm_lib.tqdm.get_lock() __a = _tqdm_cls() def __lowercase ( ) ->bool: """simple docstring""" global _tqdm_active return bool(_tqdm_active ) def __lowercase ( ) ->Optional[int]: """simple docstring""" global _tqdm_active lowercase : List[str] = True def __lowercase ( ) ->Optional[Any]: """simple docstring""" global _tqdm_active lowercase : Optional[Any] = False
337
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __SCREAMING_SNAKE_CASE ( A__ ): A : Any = 'yolos' def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = hidden_size lowercase : int = num_hidden_layers lowercase : str = num_attention_heads lowercase : str = intermediate_size lowercase : Dict = hidden_act lowercase : int = hidden_dropout_prob lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : List[Any] = initializer_range lowercase : Optional[int] = layer_norm_eps lowercase : str = image_size lowercase : Dict = patch_size lowercase : str = num_channels lowercase : Optional[int] = qkv_bias lowercase : List[str] = num_detection_tokens lowercase : List[str] = use_mid_position_embeddings lowercase : Dict = auxiliary_loss # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : Any = bbox_cost lowercase : int = giou_cost # Loss coefficients lowercase : Dict = bbox_loss_coefficient lowercase : Optional[Any] = giou_loss_coefficient lowercase : Tuple = eos_coefficient class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = version.parse('1.11' ) @property def __lowerCamelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self ): return 1E-4 @property def __lowerCamelCase ( self ): return 12
337
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = '''▁''' __a = {'''vocab_file''': '''sentencepiece.bpe.model'''} __a = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } __a = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class __SCREAMING_SNAKE_CASE ( A__ ): A : Optional[int] = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Any = ['input_ids', 'attention_mask'] def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): # Mask token behave like a normal word, i.e. include the space before it lowercase : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token lowercase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) ) lowercase : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowercase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase : str = 1 lowercase : Any = len(self.sp_model ) + self.fairseq_offset lowercase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): lowercase : Any = self.__dict__.copy() lowercase : Any = None lowercase : List[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , SCREAMING_SNAKE_CASE__ ): lowercase : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : Optional[Any] = {} lowercase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase : int = [self.cls_token_id] lowercase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : Dict = [self.sep_token_id] lowercase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCamelCase ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def __lowerCamelCase ( self ): lowercase : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase : Tuple = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = ''''''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ''' ''' ).strip() return out_string def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase : Optional[Any] = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi: lowercase : int = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,)
337
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version __a = { '''<''': operator.lt, '''<=''': operator.le, '''==''': operator.eq, '''!=''': operator.ne, '''>=''': operator.ge, '''>''': operator.gt, } def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]: """simple docstring""" if got_ver is None or want_ver is None: raise ValueError( f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider""" f""" reinstalling {pkg}.""" ) if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ): raise ImportError( f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" ) def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None: """simple docstring""" lowercase : List[Any] = f"""\n{hint}""" if hint is not None else '''''' # non-versioned check if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ): lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None else: lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but''' f""" got {requirement}""" ) lowercase , lowercase : str = match[0] lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements lowercase : List[Any] = {} for w in want_range: lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,''' f""" but got {requirement}""" ) lowercase , lowercase : Optional[int] = match[0] lowercase : Dict = want_ver if op not in ops: raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" ) # special case if pkg == "python": lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) return # check if any version is installed try: lowercase : List[str] = importlib.metadata.version(_UpperCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main''' return require_version(_UpperCamelCase, _UpperCamelCase )
337
1
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple: """simple docstring""" def get_masked_lm_array(_UpperCamelCase ): lowercase : Dict = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE""" lowercase : List[Any] = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase ) if "kernel" in name: lowercase : Tuple = array.transpose() return torch.from_numpy(_UpperCamelCase ) def get_encoder_array(_UpperCamelCase ): lowercase : Any = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE""" lowercase : Optional[Any] = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase ) if "kernel" in name: lowercase : Dict = array.transpose() return torch.from_numpy(_UpperCamelCase ) def get_encoder_layer_array(_UpperCamelCase, _UpperCamelCase ): lowercase : Optional[Any] = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE""" lowercase : List[Any] = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase ) if "kernel" in name: lowercase : List[str] = array.transpose() return torch.from_numpy(_UpperCamelCase ) def get_encoder_attention_layer_array(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : Any = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE""" lowercase : Tuple = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase ) lowercase : List[str] = array.reshape(_UpperCamelCase ) if "kernel" in name: lowercase : List[Any] = array.transpose() return torch.from_numpy(_UpperCamelCase ) print(f"""Loading model based on config from {config_path}...""" ) lowercase : Union[str, Any] = BertConfig.from_json_file(_UpperCamelCase ) lowercase : int = BertForMaskedLM(_UpperCamelCase ) # Layers for layer_index in range(0, config.num_hidden_layers ): lowercase : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase : BertSelfAttention = layer.attention.self lowercase : Optional[int] = get_encoder_attention_layer_array( _UpperCamelCase, '''_query_dense/kernel''', self_attn.query.weight.data.shape ) lowercase : Optional[Any] = get_encoder_attention_layer_array( _UpperCamelCase, '''_query_dense/bias''', self_attn.query.bias.data.shape ) lowercase : Any = get_encoder_attention_layer_array( _UpperCamelCase, '''_key_dense/kernel''', self_attn.key.weight.data.shape ) lowercase : Any = get_encoder_attention_layer_array( _UpperCamelCase, '''_key_dense/bias''', self_attn.key.bias.data.shape ) lowercase : Any = get_encoder_attention_layer_array( _UpperCamelCase, '''_value_dense/kernel''', self_attn.value.weight.data.shape ) lowercase : Tuple = get_encoder_attention_layer_array( _UpperCamelCase, '''_value_dense/bias''', self_attn.value.bias.data.shape ) # Self-attention Output lowercase : BertSelfOutput = layer.attention.output lowercase : Optional[int] = get_encoder_attention_layer_array( _UpperCamelCase, '''_output_dense/kernel''', self_output.dense.weight.data.shape ) lowercase : str = get_encoder_attention_layer_array( _UpperCamelCase, '''_output_dense/bias''', self_output.dense.bias.data.shape ) lowercase : Tuple = get_encoder_layer_array(_UpperCamelCase, '''_attention_layer_norm/gamma''' ) lowercase : str = get_encoder_layer_array(_UpperCamelCase, '''_attention_layer_norm/beta''' ) # Intermediate lowercase : BertIntermediate = layer.intermediate lowercase : List[Any] = get_encoder_layer_array(_UpperCamelCase, '''_intermediate_dense/kernel''' ) lowercase : List[str] = get_encoder_layer_array(_UpperCamelCase, '''_intermediate_dense/bias''' ) # Output lowercase : BertOutput = layer.output lowercase : Dict = get_encoder_layer_array(_UpperCamelCase, '''_output_dense/kernel''' ) lowercase : Optional[Any] = get_encoder_layer_array(_UpperCamelCase, '''_output_dense/bias''' ) lowercase : Tuple = get_encoder_layer_array(_UpperCamelCase, '''_output_layer_norm/gamma''' ) lowercase : List[str] = get_encoder_layer_array(_UpperCamelCase, '''_output_layer_norm/beta''' ) # Embeddings lowercase : Optional[int] = get_encoder_array('''_position_embedding_layer/embeddings''' ) lowercase : Union[str, Any] = get_encoder_array('''_type_embedding_layer/embeddings''' ) lowercase : Any = get_encoder_array('''_embedding_norm_layer/gamma''' ) lowercase : Tuple = get_encoder_array('''_embedding_norm_layer/beta''' ) # LM Head lowercase : Tuple = model.cls.predictions.transform lowercase : List[str] = get_masked_lm_array('''dense/kernel''' ) lowercase : Any = get_masked_lm_array('''dense/bias''' ) lowercase : Any = get_masked_lm_array('''layer_norm/gamma''' ) lowercase : Union[str, Any] = get_masked_lm_array('''layer_norm/beta''' ) lowercase : List[Any] = get_masked_lm_array('''embedding_table''' ) # Pooling lowercase : int = BertPooler(config=_UpperCamelCase ) lowercase : BertPooler = get_encoder_array('''_pooler_layer/kernel''' ) lowercase : BertPooler = get_encoder_array('''_pooler_layer/bias''' ) # Export final model model.save_pretrained(_UpperCamelCase ) # Integration test - should load without any errors ;) lowercase : Tuple = BertForMaskedLM.from_pretrained(_UpperCamelCase ) print(new_model.eval() ) print('''Model conversion was done sucessfully!''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', type=str, required=True, help='''The config json file corresponding to the BERT model. This specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', type=str, required=True, help='''Path to the output PyTorch model.''', ) __a = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
337
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __a = logging.get_logger(__name__) __a = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'deta' A : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ): if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = backbone_config.pop('''model_type''' ) lowercase : Any = CONFIG_MAPPING[backbone_model_type] lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = backbone_config lowercase : Union[str, Any] = num_queries lowercase : Any = max_position_embeddings lowercase : int = d_model lowercase : Any = encoder_ffn_dim lowercase : Optional[int] = encoder_layers lowercase : Tuple = encoder_attention_heads lowercase : Optional[Any] = decoder_ffn_dim lowercase : Optional[int] = decoder_layers lowercase : int = decoder_attention_heads lowercase : Any = dropout lowercase : int = attention_dropout lowercase : Dict = activation_dropout lowercase : int = activation_function lowercase : Dict = init_std lowercase : List[str] = init_xavier_std lowercase : Optional[Any] = encoder_layerdrop lowercase : Tuple = auxiliary_loss lowercase : Tuple = position_embedding_type # deformable attributes lowercase : List[str] = num_feature_levels lowercase : Tuple = encoder_n_points lowercase : Optional[int] = decoder_n_points lowercase : Tuple = two_stage lowercase : Optional[Any] = two_stage_num_proposals lowercase : Union[str, Any] = with_box_refine lowercase : Any = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : str = bbox_cost lowercase : List[Any] = giou_cost # Loss coefficients lowercase : Tuple = mask_loss_coefficient lowercase : Any = dice_loss_coefficient lowercase : Dict = bbox_loss_coefficient lowercase : Tuple = giou_loss_coefficient lowercase : Union[str, Any] = eos_coefficient lowercase : Tuple = focal_alpha super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCamelCase ( self ): return self.encoder_attention_heads @property def __lowerCamelCase ( self ): return self.d_model def __lowerCamelCase ( self ): lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Any = self.backbone_config.to_dict() lowercase : List[str] = self.__class__.model_type return output
337
1
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __SCREAMING_SNAKE_CASE ( A__ ): A : Dict = (DDIMParallelScheduler,) A : Union[str, Any] = (('eta', 0.0), ('num_inference_steps', 50)) def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ): lowercase : Any = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**SCREAMING_SNAKE_CASE__ ) return config def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = self.scheduler_classes[0] lowercase : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Optional[int] = 10, 0.0 lowercase : Dict = self.dummy_model() lowercase : int = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for t in scheduler.timesteps: lowercase : str = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample return sample def __lowerCamelCase ( self ): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = self.scheduler_classes[0] lowercase : Dict = self.get_scheduler_config(steps_offset=1 ) lowercase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__ ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def __lowerCamelCase ( self ): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ , beta_end=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , ) def __lowerCamelCase ( self ): for t in [1, 10, 49]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Optional[Any] = self.scheduler_classes[0] lowercase : str = self.get_scheduler_config() lowercase : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5 def __lowerCamelCase ( self ): lowercase : Any = self.scheduler_classes[0] lowercase : Union[str, Any] = self.get_scheduler_config() lowercase : str = scheduler_class(**SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Dict = 10, 0.0 scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = self.dummy_model() lowercase : Union[str, Any] = self.dummy_sample_deter lowercase : Tuple = self.dummy_sample_deter + 0.1 lowercase : Tuple = self.dummy_sample_deter - 0.1 lowercase : Dict = samplea.shape[0] lowercase : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 ) lowercase : str = torch.arange(SCREAMING_SNAKE_CASE__ )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowercase : Any = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 1147.7904 ) < 1E-2 assert abs(result_mean.item() - 0.4982 ) < 1E-3 def __lowerCamelCase ( self ): lowercase : int = self.full_loop() lowercase : int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) lowercase : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 172.0067 ) < 1E-2 assert abs(result_mean.item() - 0.223967 ) < 1E-3 def __lowerCamelCase ( self ): lowercase : List[str] = self.full_loop(prediction_type='''v_prediction''' ) lowercase : int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) lowercase : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 52.5302 ) < 1E-2 assert abs(result_mean.item() - 0.0684 ) < 1E-3 def __lowerCamelCase ( self ): # We specify different beta, so that the first alpha is 0.99 lowercase : Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) lowercase : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) lowercase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.8295 ) < 1E-2 assert abs(result_mean.item() - 0.1951 ) < 1E-3 def __lowerCamelCase ( self ): # We specify different beta, so that the first alpha is 0.99 lowercase : Optional[Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ , beta_start=0.01 ) lowercase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__ ) ) lowercase : List[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) ) assert abs(result_sum.item() - 149.0784 ) < 1E-2 assert abs(result_mean.item() - 0.1941 ) < 1E-3
337
def __lowercase ( ) ->List[Any]: """simple docstring""" lowercase : Union[str, Any] = 0 for i in range(1, 1001 ): total += i**i return str(_UpperCamelCase )[-10:] if __name__ == "__main__": print(solution())
337
1
from __future__ import annotations def __lowercase ( _UpperCamelCase ) ->float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(_UpperCamelCase ) / len(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
337
import os import re import shutil import sys import tempfile import unittest import black __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __a = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): lowercase : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) lowercase : Any = self.diffusers_dir shutil.copy( os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCamelCase ( self ): lowercase : List[Any] = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with a really long name lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
337
1
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" lowercase : int = multiprocessing.Manager() lowercase : int = manager.list() lowercase : List[str] = multiprocessing.Process(target=_UpperCamelCase, args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append('''timed out''' ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Dict: """simple docstring""" with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowercase : Tuple = shutil.rmtree lowercase : Tuple = os.rmdir lowercase : List[Any] = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowercase : Union[str, Any] = {} with swallow_io(): with time_limit(_UpperCamelCase ): exec(_UpperCamelCase, _UpperCamelCase ) result.append('''passed''' ) except TimeoutException: result.append('''timed out''' ) except BaseException as e: result.append(f"""failed: {e}""" ) # Needed for cleaning up. lowercase : Tuple = rmtree lowercase : Union[str, Any] = rmdir lowercase : Optional[Any] = chdir @contextlib.contextmanager def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" def signal_handler(_UpperCamelCase, _UpperCamelCase ): raise TimeoutException('''Timed out!''' ) signal.setitimer(signal.ITIMER_REAL, _UpperCamelCase ) signal.signal(signal.SIGALRM, _UpperCamelCase ) try: yield finally: signal.setitimer(signal.ITIMER_REAL, 0 ) @contextlib.contextmanager def __lowercase ( ) ->Dict: """simple docstring""" lowercase : List[str] = WriteOnlyStringIO() with contextlib.redirect_stdout(_UpperCamelCase ): with contextlib.redirect_stderr(_UpperCamelCase ): with redirect_stdin(_UpperCamelCase ): yield @contextlib.contextmanager def __lowercase ( ) ->int: """simple docstring""" with tempfile.TemporaryDirectory() as dirname: with chdir(_UpperCamelCase ): yield dirname class __SCREAMING_SNAKE_CASE ( A__ ): pass class __SCREAMING_SNAKE_CASE ( io.StringIO ): def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): raise OSError def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): raise OSError def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): raise OSError def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): return False class __SCREAMING_SNAKE_CASE ( contextlib._RedirectStream ): # type: ignore A : List[Any] = 'stdin' @contextlib.contextmanager def __lowercase ( _UpperCamelCase ) ->Dict: """simple docstring""" if root == ".": yield return lowercase : Dict = os.getcwd() os.chdir(_UpperCamelCase ) try: yield except BaseException as exc: raise exc finally: os.chdir(_UpperCamelCase ) def __lowercase ( _UpperCamelCase=None ) ->int: """simple docstring""" if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS, (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA, (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK, (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins lowercase : str = None lowercase : List[str] = None import os lowercase : str = '''1''' lowercase : Optional[Any] = None lowercase : Optional[int] = None lowercase : int = None lowercase : Any = None lowercase : Dict = None lowercase : Tuple = None lowercase : List[Any] = None lowercase : Optional[int] = None lowercase : Any = None lowercase : Optional[int] = None lowercase : Tuple = None lowercase : Dict = None lowercase : Any = None lowercase : Optional[int] = None lowercase : List[Any] = None lowercase : str = None lowercase : Optional[Any] = None lowercase : List[str] = None lowercase : List[str] = None lowercase : Dict = None lowercase : Optional[int] = None lowercase : Any = None lowercase : str = None lowercase : Optional[int] = None lowercase : Union[str, Any] = None lowercase : Tuple = None lowercase : Union[str, Any] = None import shutil lowercase : Optional[Any] = None lowercase : Dict = None lowercase : Optional[int] = None import subprocess lowercase : str = None # type: ignore lowercase : int = None import sys lowercase : int = None lowercase : str = None lowercase : Optional[int] = None lowercase : Optional[Any] = None lowercase : Optional[int] = None
337
import math class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1 lowercase : List[Any] = n lowercase : List[Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # adjacency matrix for weight lowercase : Union[str, Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # dp[i][j] stores minimum distance from i to j def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : int = w def __lowerCamelCase ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.dp[u][v] if __name__ == "__main__": __a = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
337
1
from string import ascii_uppercase __a = {str(ord(c) - 55): c for c in ascii_uppercase} def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str: """simple docstring""" if isinstance(_UpperCamelCase, _UpperCamelCase ): raise TypeError('''int() can\'t convert non-string with explicit base''' ) if num < 0: raise ValueError('''parameter must be positive int''' ) if isinstance(_UpperCamelCase, _UpperCamelCase ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if isinstance(_UpperCamelCase, _UpperCamelCase ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if base in (0, 1): raise ValueError('''base must be >= 2''' ) if base > 36: raise ValueError('''base must be <= 36''' ) lowercase : Dict = '''''' lowercase : Optional[int] = 0 lowercase : Tuple = 0 while div != 1: lowercase , lowercase : int = divmod(_UpperCamelCase, _UpperCamelCase ) if base >= 11 and 9 < mod < 36: lowercase : List[Any] = ALPHABET_VALUES[str(_UpperCamelCase )] else: lowercase : Dict = str(_UpperCamelCase ) new_value += actual_value lowercase : List[Any] = num // base lowercase : Optional[int] = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(_UpperCamelCase ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(10_00): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
337
from __future__ import annotations def __lowercase ( _UpperCamelCase ) ->float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(_UpperCamelCase ) / len(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
337
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __a = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = ['pixel_values'] def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 1 / 255 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Dict = size if size is not None else {'''shortest_edge''': 256} lowercase : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) lowercase : Any = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowercase : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE__ ) lowercase : int = do_resize lowercase : int = size lowercase : Tuple = resample lowercase : Optional[int] = do_center_crop lowercase : Optional[int] = crop_size lowercase : Union[str, Any] = do_rescale lowercase : str = rescale_factor lowercase : Dict = do_normalize lowercase : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowercase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): lowercase : Any = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowercase : Optional[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE__ ) return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): lowercase : Tuple = get_size_dict(SCREAMING_SNAKE_CASE__ ) return center_crop(SCREAMING_SNAKE_CASE__ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ ): return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ): lowercase : str = do_resize if do_resize is not None else self.do_resize lowercase : List[str] = size if size is not None else self.size lowercase : Dict = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = resample if resample is not None else self.resample lowercase : str = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size lowercase : str = get_size_dict(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = do_rescale if do_rescale is not None else self.do_rescale lowercase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize lowercase : Tuple = image_mean if image_mean is not None else self.image_mean lowercase : Tuple = image_std if image_std is not None else self.image_std lowercase : Dict = make_list_of_images(SCREAMING_SNAKE_CASE__ ) if not valid_images(SCREAMING_SNAKE_CASE__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. lowercase : List[str] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images] if do_resize: lowercase : List[str] = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images] if do_center_crop: lowercase : List[str] = [self.center_crop(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ ) for image in images] if do_rescale: lowercase : int = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images] if do_normalize: lowercase : str = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images] lowercase : Tuple = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images] lowercase : str = {'''pixel_values''': images} return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
337
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __a = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
337
1
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging __a = { '''cola''': 2, '''mnli''': 3, '''mrpc''': 2, '''sst-2''': 2, '''sts-b''': 1, '''qqp''': 2, '''qnli''': 2, '''rte''': 2, '''wnli''': 2, } logging.set_verbosity_info() def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None ) ->Union[str, Any]: """simple docstring""" lowercase : Optional[Any] = XLNetConfig.from_json_file(_UpperCamelCase ) lowercase : Union[str, Any] = finetuning_task.lower() if finetuning_task is not None else '''''' if finetuning_task in GLUE_TASKS_NUM_LABELS: print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" ) lowercase : Any = finetuning_task lowercase : Tuple = GLUE_TASKS_NUM_LABELS[finetuning_task] lowercase : str = XLNetForSequenceClassification(_UpperCamelCase ) elif "squad" in finetuning_task: lowercase : Tuple = finetuning_task lowercase : Union[str, Any] = XLNetForQuestionAnswering(_UpperCamelCase ) else: lowercase : Optional[int] = XLNetLMHeadModel(_UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) # Save pytorch-model lowercase : Dict = os.path.join(_UpperCamelCase, _UpperCamelCase ) lowercase : Dict = os.path.join(_UpperCamelCase, _UpperCamelCase ) print(f"""Save PyTorch model to {os.path.abspath(_UpperCamelCase )}""" ) torch.save(model.state_dict(), _UpperCamelCase ) print(f"""Save configuration file to {os.path.abspath(_UpperCamelCase )}""" ) with open(_UpperCamelCase, '''w''', encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--xlnet_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained XLNet model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--finetuning_task''', default=None, type=str, help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''', ) __a = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
337
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __a = logging.get_logger(__name__) def __lowercase ( _UpperCamelCase ) ->List[int]: """simple docstring""" if isinstance(_UpperCamelCase, np.ndarray ): return list(tensor.shape ) lowercase : Optional[Any] = tf.shape(_UpperCamelCase ) if tensor.shape == tf.TensorShape(_UpperCamelCase ): return dynamic lowercase : Tuple = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )] def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase : int = [1] * inputs.shape.rank lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis] lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase ) lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase ) # Compute layer normalization using the batch_normalization # function. lowercase : List[str] = tf.nn.batch_normalization( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, ) return outputs def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase : Dict = tf.shape(_UpperCamelCase ) lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 ) return tf.reshape(_UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->tf.Tensor: """simple docstring""" if not isinstance(_UpperCamelCase, tf.Tensor ): lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase : str = ( tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None: """simple docstring""" tf.debugging.assert_less( _UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ), ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : List[Any] = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) lowercase : Any = np.asarray(_UpperCamelCase ) lowercase : List[Any] = 1 lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_UpperCamelCase ): lowercase : Optional[int] = chunk_data else: lowercase : int = data def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" if name in group.attrs: lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]] else: lowercase : Optional[Any] = [] lowercase : List[str] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def __lowercase ( _UpperCamelCase ) ->List[str]: """simple docstring""" def _expand_single_ad_tensor(_UpperCamelCase ): if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_UpperCamelCase, axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
337
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 / 255 , SCREAMING_SNAKE_CASE__=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowercase : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} lowercase : List[Any] = parent lowercase : Optional[Any] = batch_size lowercase : List[str] = num_channels lowercase : Any = min_resolution lowercase : Any = max_resolution lowercase : Tuple = do_resize lowercase : List[str] = size lowercase : int = do_normalize lowercase : Union[str, Any] = image_mean lowercase : str = image_std lowercase : str = do_rescale lowercase : str = rescale_factor lowercase : Union[str, Any] = do_pad def __lowerCamelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): if not batched: lowercase : Union[str, Any] = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ): lowercase , lowercase : Union[str, Any] = image.size else: lowercase , lowercase : List[Any] = image.shape[1], image.shape[2] if w < h: lowercase : Tuple = int(self.size['''shortest_edge'''] * h / w ) lowercase : Union[str, Any] = self.size['''shortest_edge'''] elif w > h: lowercase : Tuple = self.size['''shortest_edge'''] lowercase : int = int(self.size['''shortest_edge'''] * w / h ) else: lowercase : List[Any] = self.size['''shortest_edge'''] lowercase : int = self.size['''shortest_edge'''] else: lowercase : Optional[Any] = [] for image in image_inputs: lowercase , lowercase : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase : Tuple = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[0] )[0] lowercase : Union[str, Any] = max(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): A : Optional[int] = DeformableDetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): lowercase : Dict = DeformableDetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): lowercase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_rescale''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_pad''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) ) def __lowerCamelCase ( self ): lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ ) lowercase : Any = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE__ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): # Initialize image_processing lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase , lowercase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase , lowercase : List[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) lowercase : str = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): # Initialize image_processing lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) # Test not batched input lowercase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase , lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase : Tuple = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values lowercase , lowercase : str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): # Initialize image_processing lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) # Test not batched input lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values lowercase , lowercase : Any = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values lowercase , lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): # prepare image and target lowercase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: lowercase : str = json.loads(f.read() ) lowercase : str = {'''image_id''': 39769, '''annotations''': target} # encode them lowercase : Optional[int] = DeformableDetrImageProcessor() lowercase : int = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) # verify pixel values lowercase : int = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE__ ) lowercase : int = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) # verify area lowercase : Union[str, Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE__ ) ) # verify boxes lowercase : str = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) # verify image_id lowercase : Optional[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE__ ) ) # verify is_crowd lowercase : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE__ ) ) # verify class_labels lowercase : Dict = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE__ ) ) # verify orig_size lowercase : Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE__ ) ) # verify size lowercase : int = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE__ ) ) @slow def __lowerCamelCase ( self ): # prepare image, target and masks_path lowercase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: lowercase : int = json.loads(f.read() ) lowercase : Tuple = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} lowercase : Optional[int] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them lowercase : Optional[int] = DeformableDetrImageProcessor(format='''coco_panoptic''' ) lowercase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ , annotations=SCREAMING_SNAKE_CASE__ , masks_path=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ) # verify pixel values lowercase : Union[str, Any] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , SCREAMING_SNAKE_CASE__ ) lowercase : str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) # verify area lowercase : int = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , SCREAMING_SNAKE_CASE__ ) ) # verify boxes lowercase : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , SCREAMING_SNAKE_CASE__ ) lowercase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , SCREAMING_SNAKE_CASE__ , atol=1E-3 ) ) # verify image_id lowercase : int = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , SCREAMING_SNAKE_CASE__ ) ) # verify is_crowd lowercase : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , SCREAMING_SNAKE_CASE__ ) ) # verify class_labels lowercase : int = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , SCREAMING_SNAKE_CASE__ ) ) # verify masks lowercase : Dict = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , SCREAMING_SNAKE_CASE__ ) # verify orig_size lowercase : Optional[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , SCREAMING_SNAKE_CASE__ ) ) # verify size lowercase : str = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , SCREAMING_SNAKE_CASE__ ) )
337
def __lowercase ( _UpperCamelCase = 4000000 ) ->int: """simple docstring""" lowercase : int = [] lowercase , lowercase : str = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCamelCase ) lowercase , lowercase : Dict = b, a + b return sum(_UpperCamelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
337
1
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger() def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = True ) ->List[Any]: """simple docstring""" print(f"""Converting {name}...""" ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": lowercase : Union[str, Any] = timm.create_model('''levit_128s''', pretrained=_UpperCamelCase ) else: lowercase : Optional[int] = timm.create_model('''levit_128''', pretrained=_UpperCamelCase ) if hidden_sizes == 192: lowercase : Tuple = timm.create_model('''levit_192''', pretrained=_UpperCamelCase ) if hidden_sizes == 256: lowercase : str = timm.create_model('''levit_256''', pretrained=_UpperCamelCase ) if hidden_sizes == 384: lowercase : Optional[Any] = timm.create_model('''levit_384''', pretrained=_UpperCamelCase ) from_model.eval() lowercase : Optional[Any] = LevitForImageClassificationWithTeacher(_UpperCamelCase ).eval() lowercase : Dict = OrderedDict() lowercase : List[Any] = from_model.state_dict() lowercase : List[str] = list(from_model.state_dict().keys() ) lowercase : List[str] = list(our_model.state_dict().keys() ) print(len(_UpperCamelCase ), len(_UpperCamelCase ) ) for i in range(len(_UpperCamelCase ) ): lowercase : Tuple = weights[og_keys[i]] our_model.load_state_dict(_UpperCamelCase ) lowercase : Tuple = torch.randn((2, 3, 224, 224) ) lowercase : Union[str, Any] = from_model(_UpperCamelCase ) lowercase : str = our_model(_UpperCamelCase ).logits assert torch.allclose(_UpperCamelCase, _UpperCamelCase ), "The model logits don't match the original one." lowercase : str = name print(_UpperCamelCase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowercase : Any = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f"""Pushed {checkpoint_name}""" ) def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = True ) ->Any: """simple docstring""" lowercase : Dict = '''imagenet-1k-id2label.json''' lowercase : Any = 1000 lowercase : Tuple = (1, num_labels) lowercase : Dict = '''huggingface/label-files''' lowercase : int = num_labels lowercase : Any = json.load(open(hf_hub_download(_UpperCamelCase, _UpperCamelCase, repo_type='''dataset''' ), '''r''' ) ) lowercase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} lowercase : List[str] = idalabel lowercase : str = {v: k for k, v in idalabel.items()} lowercase : Union[str, Any] = partial(_UpperCamelCase, num_labels=_UpperCamelCase, idalabel=_UpperCamelCase, labelaid=_UpperCamelCase ) lowercase : str = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } lowercase : List[Any] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name], _UpperCamelCase, names_to_config[model_name], _UpperCamelCase, _UpperCamelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name], _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) return config, expected_shape if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''levit-dump-folder/''', type=Path, required=False, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') parser.add_argument( '''--no-push_to_hub''', dest='''push_to_hub''', action='''store_false''', help='''Do not push model and image processor to the hub''', ) __a = parser.parse_args() __a = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
337
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __a = logging.get_logger(__name__) __a = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'perceiver' def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Any = num_latents lowercase : Union[str, Any] = d_latents lowercase : str = d_model lowercase : int = num_blocks lowercase : str = num_self_attends_per_block lowercase : List[str] = num_self_attention_heads lowercase : List[str] = num_cross_attention_heads lowercase : int = qk_channels lowercase : List[Any] = v_channels lowercase : int = cross_attention_shape_for_attention lowercase : Tuple = self_attention_widening_factor lowercase : Dict = cross_attention_widening_factor lowercase : Any = hidden_act lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : Union[str, Any] = initializer_range lowercase : Any = layer_norm_eps lowercase : Any = use_query_residual # masked language modeling attributes lowercase : List[str] = vocab_size lowercase : Dict = max_position_embeddings # image classification attributes lowercase : int = image_size # flow attributes lowercase : List[Any] = train_size # multimodal autoencoding attributes lowercase : List[Any] = num_frames lowercase : Union[str, Any] = audio_samples_per_frame lowercase : int = samples_per_patch lowercase : Optional[int] = output_shape class __SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCamelCase ( self ): if self.task == "multiple-choice": lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def __lowerCamelCase ( self ): return 1E-4 def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : str = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ ) # Generate dummy inputs according to compute batch and sequence lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''input_ids''' ) return inputs elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch ) lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
337
1
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging __a = logging.get_logger(__name__) # pylint: disable=invalid-name class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=768 ): super().__init__(SCREAMING_SNAKE_CASE__ ) lowercase : int = proj_size lowercase : Dict = CLIPVisionModel(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = PaintByExampleMapper(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = nn.LayerNorm(config.hidden_size ) lowercase : Union[str, Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling lowercase : Tuple = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): lowercase : Any = self.model(pixel_values=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = clip_output.pooler_output lowercase : List[Any] = self.mapper(latent_states[:, None] ) lowercase : Any = self.final_layer_norm(SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = self.proj_out(SCREAMING_SNAKE_CASE__ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class __SCREAMING_SNAKE_CASE ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE__ ): super().__init__() lowercase : Dict = (config.num_hidden_layers + 1) // 5 lowercase : Dict = config.hidden_size lowercase : int = 1 lowercase : Optional[Any] = nn.ModuleList( [ BasicTransformerBlock(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation_fn='''gelu''' , attention_bias=SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ ) ] ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): for block in self.blocks: lowercase : Union[str, Any] = block(SCREAMING_SNAKE_CASE__ ) return hidden_states
337
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __lowercase ( _UpperCamelCase = 8 ) ->str: """simple docstring""" lowercase : List[str] = ascii_letters + digits + punctuation return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str: """simple docstring""" i -= len(_UpperCamelCase ) lowercase : Dict = i // 3 lowercase : List[str] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowercase : Union[str, Any] = ( chars_incl + random(_UpperCamelCase, quotient + remainder ) + random(_UpperCamelCase, _UpperCamelCase ) + random(_UpperCamelCase, _UpperCamelCase ) ) lowercase : Union[str, Any] = list(_UpperCamelCase ) shuffle(_UpperCamelCase ) return "".join(_UpperCamelCase ) # random is a generalised function for letters, characters and numbers def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str: """simple docstring""" return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool: """simple docstring""" if len(_UpperCamelCase ) < min_length: # Your Password must be at least 8 characters long return False lowercase : str = any(char in ascii_uppercase for char in password ) lowercase : List[str] = any(char in ascii_lowercase for char in password ) lowercase : Dict = any(char in digits for char in password ) lowercase : Tuple = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __lowercase ( ) ->Dict: """simple docstring""" lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() ) lowercase : Optional[Any] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''', password_generator(_UpperCamelCase ) ) print( '''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
337
1
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __a = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
from __future__ import annotations __a = [] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" for i in range(len(_UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" if row >= len(_UpperCamelCase ): solution.append(_UpperCamelCase ) printboard(_UpperCamelCase ) print() return True for i in range(len(_UpperCamelCase ) ): if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : int = 1 solve(_UpperCamelCase, row + 1 ) lowercase : Tuple = 0 return False def __lowercase ( _UpperCamelCase ) ->None: """simple docstring""" for i in range(len(_UpperCamelCase ) ): for j in range(len(_UpperCamelCase ) ): if board[i][j] == 1: print('''Q''', end=''' ''' ) else: print('''.''', end=''' ''' ) print() # n=int(input("The no. of queens")) __a = 8 __a = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
337
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): A : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowercase : Any = VideoClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ , top_k=2 ) lowercase : List[Any] = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): for example in examples: lowercase : List[Any] = video_classifier(SCREAMING_SNAKE_CASE__ ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ {'''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''score''': ANY(SCREAMING_SNAKE_CASE__ ), '''label''': ANY(SCREAMING_SNAKE_CASE__ )}, ] , ) @require_torch def __lowerCamelCase ( self ): lowercase : Optional[Any] = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowercase : Tuple = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowercase : List[str] = pipeline( '''video-classification''' , model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ , frame_sampling_rate=4 ) lowercase : Optional[int] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowercase : Optional[int] = video_classifier(SCREAMING_SNAKE_CASE__ , top_k=2 ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , ) lowercase : str = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [ [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def __lowerCamelCase ( self ): pass
337
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
1
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = 3 lowercase : Tuple = 250 lowercase : List[str] = ids_tensor((batch_size, length) , SCREAMING_SNAKE_CASE__ ) lowercase : Any = torch.ones((batch_size, length) , device=SCREAMING_SNAKE_CASE__ , dtype=torch.float ) / length return input_ids, scores def __lowerCamelCase ( self ): lowercase , lowercase : Optional[Any] = self._get_tensors(5 ) lowercase : int = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase , lowercase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase , lowercase : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) def __lowerCamelCase ( self ): lowercase : Tuple = MaxLengthCriteria(max_length=10 ) lowercase , lowercase : Tuple = self._get_tensors(5 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase , lowercase : Dict = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase , lowercase : List[str] = self._get_tensors(10 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowercase , lowercase : int = self._get_tensors(5 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase , lowercase : int = self._get_tensors(9 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase , lowercase : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase : Tuple = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __lowerCamelCase ( self ): lowercase , lowercase : List[str] = self._get_tensors(5 ) lowercase : str = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase : Tuple = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) def __lowerCamelCase ( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(SCREAMING_SNAKE_CASE__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowercase : Any = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1 )
337
from collections.abc import Callable class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ = None ): # Stores actual heap items. lowercase : list = [] # Stores indexes of each item for supporting updates and deletion. lowercase : dict = {} # Stores current size of heap. lowercase : str = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return int((i - 1) / 2 ) if i > 0 else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : Dict = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. lowercase , lowercase : int = self.arr[j], self.arr[i] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.arr[i][1] < self.arr[j][1] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : int = self._left(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = i if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = left if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = right return valid_parent def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ ) while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) while valid_parent != index: self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : str = self.pos_map[item] lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : List[str] = self.pos_map[item] del self.pos_map[item] lowercase : Optional[int] = self.arr[self.size - 1] lowercase : int = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] ) else: lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )] lowercase : str = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowerCamelCase ( self ): return self.arr[0] if self.size else None def __lowerCamelCase ( self ): lowercase : str = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def __lowercase ( ) ->None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
337
1
from __future__ import annotations from dataclasses import dataclass @dataclass class __SCREAMING_SNAKE_CASE : A : float A : TreeNode | None = None A : TreeNode | None = None def __lowercase ( _UpperCamelCase ) ->bool: """simple docstring""" def is_valid_tree(_UpperCamelCase ) -> bool: if node is None: return True if not isinstance(_UpperCamelCase, _UpperCamelCase ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(_UpperCamelCase ): raise ValueError( '''Each node should be type of TreeNode and data should be float.''' ) def is_binary_search_tree_recursive_check( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left, _UpperCamelCase, node.data ) and is_binary_search_tree_recursive_check( node.right, node.data, _UpperCamelCase ) ) return is_binary_search_tree_recursive_check(_UpperCamelCase, -float('''inf''' ), float('''inf''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
337
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __SCREAMING_SNAKE_CASE ( A__ ): A : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
337
1
from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( A__ ): A : Optional[int] = 'efficientnet' def __init__( self , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 600 , SCREAMING_SNAKE_CASE__ = 2.0 , SCREAMING_SNAKE_CASE__ = 3.1 , SCREAMING_SNAKE_CASE__ = 8 , SCREAMING_SNAKE_CASE__ = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE__ = [32, 16, 24, 40, 80, 112, 192] , SCREAMING_SNAKE_CASE__ = [16, 24, 40, 80, 112, 192, 320] , SCREAMING_SNAKE_CASE__ = [] , SCREAMING_SNAKE_CASE__ = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE__ = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE__ = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE__ = 0.25 , SCREAMING_SNAKE_CASE__ = "swish" , SCREAMING_SNAKE_CASE__ = 2560 , SCREAMING_SNAKE_CASE__ = "mean" , SCREAMING_SNAKE_CASE__ = 0.02 , SCREAMING_SNAKE_CASE__ = 0.001 , SCREAMING_SNAKE_CASE__ = 0.99 , SCREAMING_SNAKE_CASE__ = 0.5 , SCREAMING_SNAKE_CASE__ = 0.2 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = num_channels lowercase : Optional[Any] = image_size lowercase : int = width_coefficient lowercase : Union[str, Any] = depth_coefficient lowercase : Optional[int] = depth_divisor lowercase : Optional[Any] = kernel_sizes lowercase : int = in_channels lowercase : Union[str, Any] = out_channels lowercase : List[str] = depthwise_padding lowercase : Optional[int] = strides lowercase : Any = num_block_repeats lowercase : str = expand_ratios lowercase : Any = squeeze_expansion_ratio lowercase : List[Any] = hidden_act lowercase : Optional[int] = hidden_dim lowercase : List[Any] = pooling_type lowercase : List[str] = initializer_range lowercase : str = batch_norm_eps lowercase : Optional[int] = batch_norm_momentum lowercase : Any = dropout_rate lowercase : int = drop_connect_rate lowercase : Dict = sum(SCREAMING_SNAKE_CASE__ ) * 4 class __SCREAMING_SNAKE_CASE ( A__ ): A : List[Any] = version.parse('1.11' ) @property def __lowerCamelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self ): return 1E-5
337
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __a = logging.get_logger(__name__) __a = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __a = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } __a = {'''facebook/blenderbot-3B''': 1_28} class __SCREAMING_SNAKE_CASE ( A__ ): A : Dict = VOCAB_FILES_NAMES A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[int] = ['input_ids', 'attention_mask'] A : str = BlenderbotTokenizer def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ): super().__init__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) ) lowercase : str = add_prefix_space lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = add_prefix_space lowercase : str = '''post_processor''' lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if tokenizer_component_instance: lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase : Tuple = tuple(state['''sep'''] ) if "cls" in state: lowercase : Union[str, Any] = tuple(state['''cls'''] ) lowercase : Optional[int] = False if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: lowercase : Any = add_prefix_space lowercase : Tuple = True if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets: lowercase : List[str] = trim_offsets lowercase : Optional[int] = True if changes_to_apply: lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) ) lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __lowerCamelCase ( self ): if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value lowercase : Any = value def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : Tuple = [self.sep_token_id] lowercase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): return token_ids_a + [self.eos_token_id] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ ) lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length: lowercase : Tuple = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
337
1
def __lowercase ( _UpperCamelCase = 10**12 ) ->int: """simple docstring""" lowercase : Tuple = 1 lowercase : List[str] = 0 lowercase : List[str] = 1 lowercase : List[Any] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'''{solution() = }''')
337
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowercase ( ) ->int: """simple docstring""" lowercase : Tuple = HfArgumentParser(_UpperCamelCase ) lowercase : List[str] = parser.parse_args_into_dataclasses()[0] lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase ) try: lowercase : Any = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] ) lowercase : Any = '''''' lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] ) lowercase : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase ) raise ValueError(_UpperCamelCase ) benchmark.run() if __name__ == "__main__": main()
337
1
from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , ): super().__init__() self.register_modules(transformer=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ ) # create a imagenet -> id dictionary for easier use lowercase : Any = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(''',''' ): lowercase : List[str] = int(SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = dict(sorted(self.labels.items() ) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ ) for l in label: if l not in self.labels: raise ValueError( f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 4.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 50 , SCREAMING_SNAKE_CASE__ = "pil" , SCREAMING_SNAKE_CASE__ = True , ): lowercase : int = len(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = self.transformer.config.sample_size lowercase : Optional[Any] = self.transformer.config.in_channels lowercase : Dict = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=SCREAMING_SNAKE_CASE__ , device=self.device , dtype=self.transformer.dtype , ) lowercase : Any = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents lowercase : Dict = torch.tensor(SCREAMING_SNAKE_CASE__ , device=self.device ).reshape(-1 ) lowercase : Any = torch.tensor([1000] * batch_size , device=self.device ) lowercase : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: lowercase : int = latent_model_input[: len(SCREAMING_SNAKE_CASE__ ) // 2] lowercase : List[Any] = torch.cat([half, half] , dim=0 ) lowercase : Any = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = t if not torch.is_tensor(SCREAMING_SNAKE_CASE__ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) lowercase : Any = latent_model_input.device.type == '''mps''' if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = torch.floataa if is_mps else torch.floataa else: lowercase : Optional[Any] = torch.intaa if is_mps else torch.intaa lowercase : int = torch.tensor([timesteps] , dtype=SCREAMING_SNAKE_CASE__ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: lowercase : Optional[Any] = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase : str = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output lowercase : Optional[Any] = self.transformer( SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , class_labels=SCREAMING_SNAKE_CASE__ ).sample # perform guidance if guidance_scale > 1: lowercase , lowercase : Tuple = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] lowercase , lowercase : Union[str, Any] = torch.split(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) // 2 , dim=0 ) lowercase : Tuple = uncond_eps + guidance_scale * (cond_eps - uncond_eps) lowercase : int = torch.cat([half_eps, half_eps] , dim=0 ) lowercase : Tuple = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: lowercase , lowercase : int = torch.split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , dim=1 ) else: lowercase : Any = noise_pred # compute previous image: x_t -> x_t-1 lowercase : Dict = self.scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample if guidance_scale > 1: lowercase , lowercase : Optional[int] = latent_model_input.chunk(2 , dim=0 ) else: lowercase : int = latent_model_input lowercase : Any = 1 / self.vae.config.scaling_factor * latents lowercase : str = self.vae.decode(SCREAMING_SNAKE_CASE__ ).sample lowercase : List[str] = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase : Any = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": lowercase : Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
337
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = [False] * len(_UpperCamelCase ) lowercase : Optional[int] = [] queue.append(_UpperCamelCase ) lowercase : Union[str, Any] = True while queue: lowercase : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCamelCase ) lowercase : Tuple = True lowercase : Optional[Any] = u return visited[t] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" lowercase : List[str] = [-1] * (len(_UpperCamelCase )) lowercase : int = 0 while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : List[str] = float('''Inf''' ) lowercase : int = sink while s != source: # Find the minimum value in select path lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] ) lowercase : Union[str, Any] = parent[s] max_flow += path_flow lowercase : Optional[int] = sink while v != source: lowercase : Any = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : Union[str, Any] = parent[v] return max_flow __a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __a , __a = 0, 5 print(ford_fulkerson(graph, source, sink))
337
1
def __lowercase ( _UpperCamelCase ) ->bool: """simple docstring""" lowercase : str = 0 for ch in input_str: lowercase : Dict = ord(_UpperCamelCase ) lowercase : str = pow(2, _UpperCamelCase ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
337
from typing import List from .keymap import KEYMAP, get_character def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += [key] setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator def __lowercase ( *_UpperCamelCase ) ->Any: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += keys setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator class __SCREAMING_SNAKE_CASE ( A__ ): def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ): setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} ) setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] ) for key in handled_keys: lowercase : List[Any] = value return new_cls @staticmethod def __lowerCamelCase ( cls ): lowercase : Dict = get_character() if char != KEYMAP["undefined"]: lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ ) if handler: lowercase : Tuple = char return handler(cls ) else: return None def __lowercase ( cls ) ->Any: """simple docstring""" return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
337
1
import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__="last" , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ): lowercase : List[str] = parent lowercase : List[Any] = batch_size lowercase : Optional[int] = seq_length lowercase : str = is_training lowercase : Tuple = use_input_lengths lowercase : Optional[Any] = use_token_type_ids lowercase : Optional[Any] = use_labels lowercase : Tuple = gelu_activation lowercase : Tuple = sinusoidal_embeddings lowercase : str = causal lowercase : Optional[int] = asm lowercase : Optional[Any] = n_langs lowercase : Optional[int] = vocab_size lowercase : List[Any] = n_special lowercase : Dict = hidden_size lowercase : List[Any] = num_hidden_layers lowercase : str = num_attention_heads lowercase : Tuple = hidden_dropout_prob lowercase : str = attention_probs_dropout_prob lowercase : Tuple = max_position_embeddings lowercase : Optional[Any] = type_vocab_size lowercase : Optional[int] = type_sequence_label_size lowercase : Union[str, Any] = initializer_range lowercase : int = num_labels lowercase : str = num_choices lowercase : str = summary_type lowercase : Optional[Any] = use_proj lowercase : List[Any] = scope def __lowerCamelCase ( self ): lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : List[Any] = None if self.use_input_lengths: lowercase : Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowercase : Optional[int] = None if self.use_token_type_ids: lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowercase : int = None lowercase : Optional[Any] = None lowercase : Optional[Any] = None if self.use_labels: lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float() lowercase : Any = ids_tensor([self.batch_size] , self.num_choices ) lowercase : Union[str, Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __lowerCamelCase ( self ): return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): lowercase : Optional[int] = FlaubertModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , lengths=SCREAMING_SNAKE_CASE__ , langs=SCREAMING_SNAKE_CASE__ ) lowercase : Any = model(SCREAMING_SNAKE_CASE__ , langs=SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): lowercase : List[str] = FlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): lowercase : Dict = FlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Any = model(SCREAMING_SNAKE_CASE__ ) lowercase : Any = model(SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): lowercase : Union[str, Any] = FlaubertForQuestionAnswering(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = model( SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , cls_index=SCREAMING_SNAKE_CASE__ , is_impossible=SCREAMING_SNAKE_CASE__ , p_mask=SCREAMING_SNAKE_CASE__ , ) lowercase : int = model( SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , cls_index=SCREAMING_SNAKE_CASE__ , is_impossible=SCREAMING_SNAKE_CASE__ , ) ((lowercase) , ) : Optional[int] = result_with_labels.to_tuple() lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ ) ((lowercase) , ) : List[str] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): lowercase : Any = FlaubertForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Any = model(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): lowercase : Union[str, Any] = self.num_labels lowercase : List[Any] = FlaubertForTokenClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): lowercase : Any = self.num_choices lowercase : int = FlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase : Optional[Any] = model( SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self ): lowercase : List[str] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Tuple = config_and_inputs lowercase : Any = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): A : Optional[int] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) A : Optional[int] = ( { 'feature-extraction': FlaubertModel, 'fill-mask': FlaubertWithLMHeadModel, 'question-answering': FlaubertForQuestionAnsweringSimple, 'text-classification': FlaubertForSequenceClassification, 'token-classification': FlaubertForTokenClassification, 'zero-shot': FlaubertForSequenceClassification, } if is_torch_available() else {} ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): lowercase : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowercase : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) lowercase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def __lowerCamelCase ( self ): lowercase : str = FlaubertModelTester(self ) lowercase : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , emb_dim=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*SCREAMING_SNAKE_CASE__ ) @slow def __lowerCamelCase ( self ): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Union[str, Any] = FlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @slow @require_torch_gpu def __lowerCamelCase ( self ): lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowercase : Dict = True lowercase : str = model_class(config=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = torch.jit.trace( SCREAMING_SNAKE_CASE__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , '''traced_model.pt''' ) ) lowercase : Optional[Any] = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE__ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE__ ) loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE__ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE__ ) ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __lowerCamelCase ( self ): lowercase : Tuple = FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' ) lowercase : Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )[0] lowercase : Any = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ ) lowercase : str = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
337
import logging import os from .state import PartialState class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ): @staticmethod def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ ) if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ): if self._should_log(SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) elif in_order: lowercase : List[Any] = PartialState() for i in range(state.num_processes ): if i == state.process_index: lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) state.wait_for_everyone() def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]: """simple docstring""" if log_level is None: lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase ) lowercase : str = logging.getLogger(_UpperCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_UpperCamelCase, {} )
337
1
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __a = { '''sample_size''': 32, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': 10_00, '''block_out_channels''': [32, 64], '''attention_head_dim''': 8, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __a = { '''sample_size''': 64, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 3, '''num_class_embeds''': 10_00, '''block_out_channels''': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''scale_shift''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __a = { '''sample_size''': 2_56, '''in_channels''': 3, '''out_channels''': 3, '''layers_per_block''': 2, '''num_class_embeds''': None, '''block_out_channels''': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4], '''attention_head_dim''': 64, '''down_block_types''': [ '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''ResnetDownsampleBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', '''AttnDownBlock2D''', ], '''up_block_types''': [ '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''AttnUpBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', '''ResnetUpsampleBlock2D''', ], '''resnet_time_scale_shift''': '''default''', '''upsample_type''': '''resnet''', '''downsample_type''': '''resnet''', } __a = { '''num_train_timesteps''': 40, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } __a = { '''num_train_timesteps''': 2_01, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } __a = { '''num_train_timesteps''': 1_51, '''sigma_min''': 0.0_0_2, '''sigma_max''': 8_0.0, } def __lowercase ( _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" if isinstance(_UpperCamelCase, _UpperCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=False ) ->Any: """simple docstring""" lowercase : Any = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] lowercase : List[str] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] lowercase : str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] lowercase : Dict = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] lowercase : Optional[int] = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] lowercase : Dict = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] lowercase : Optional[int] = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] lowercase : int = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] lowercase : Union[str, Any] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] lowercase : Optional[Any] = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: lowercase : int = checkpoint[f"""{old_prefix}.skip_connection.weight"""] lowercase : List[Any] = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=None ) ->Dict: """simple docstring""" lowercase , lowercase , lowercase : Optional[int] = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3, dim=0 ) lowercase , lowercase , lowercase : str = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3, dim=0 ) lowercase : Dict = checkpoint[f"""{old_prefix}.norm.weight"""] lowercase : List[Any] = checkpoint[f"""{old_prefix}.norm.bias"""] lowercase : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 ) lowercase : Optional[int] = bias_q.squeeze(-1 ).squeeze(-1 ) lowercase : Dict = weight_k.squeeze(-1 ).squeeze(-1 ) lowercase : Dict = bias_k.squeeze(-1 ).squeeze(-1 ) lowercase : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 ) lowercase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 ) lowercase : Optional[Any] = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) lowercase : Tuple = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Optional[int]: """simple docstring""" lowercase : Tuple = torch.load(_UpperCamelCase, map_location='''cpu''' ) lowercase : Any = {} lowercase : Tuple = checkpoint['''time_embed.0.weight'''] lowercase : Union[str, Any] = checkpoint['''time_embed.0.bias'''] lowercase : Any = checkpoint['''time_embed.2.weight'''] lowercase : Tuple = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: lowercase : List[str] = checkpoint['''label_emb.weight'''] lowercase : Optional[Any] = checkpoint['''input_blocks.0.0.weight'''] lowercase : Optional[Any] = checkpoint['''input_blocks.0.0.bias'''] lowercase : Dict = unet_config['''down_block_types'''] lowercase : List[str] = unet_config['''layers_per_block'''] lowercase : Dict = unet_config['''attention_head_dim'''] lowercase : str = unet_config['''block_out_channels'''] lowercase : int = 1 lowercase : Tuple = channels_list[0] for i, layer_type in enumerate(_UpperCamelCase ): lowercase : int = channels_list[i] lowercase : Any = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(_UpperCamelCase ): lowercase : Optional[int] = f"""down_blocks.{i}.resnets.{j}""" lowercase : List[str] = f"""input_blocks.{current_layer}.0""" lowercase : Optional[Any] = True if j == 0 and downsample_block_has_skip else False lowercase : List[Any] = convert_resnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, has_skip=_UpperCamelCase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(_UpperCamelCase ): lowercase : Optional[int] = f"""down_blocks.{i}.resnets.{j}""" lowercase : Optional[Any] = f"""input_blocks.{current_layer}.0""" lowercase : Optional[int] = True if j == 0 and downsample_block_has_skip else False lowercase : str = convert_resnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, has_skip=_UpperCamelCase ) lowercase : Tuple = f"""down_blocks.{i}.attentions.{j}""" lowercase : int = f"""input_blocks.{current_layer}.1""" lowercase : str = convert_attention( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) current_layer += 1 if i != len(_UpperCamelCase ) - 1: lowercase : List[Any] = f"""down_blocks.{i}.downsamplers.0""" lowercase : List[Any] = f"""input_blocks.{current_layer}.0""" lowercase : Optional[Any] = convert_resnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) current_layer += 1 lowercase : Union[str, Any] = current_channels # hardcoded the mid-block for now lowercase : Dict = '''mid_block.resnets.0''' lowercase : Tuple = '''middle_block.0''' lowercase : List[Any] = convert_resnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) lowercase : Optional[Any] = '''mid_block.attentions.0''' lowercase : str = '''middle_block.1''' lowercase : str = convert_attention(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) lowercase : List[str] = '''mid_block.resnets.1''' lowercase : List[str] = '''middle_block.2''' lowercase : Optional[int] = convert_resnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) lowercase : Tuple = 0 lowercase : int = unet_config['''up_block_types'''] for i, layer_type in enumerate(_UpperCamelCase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): lowercase : Union[str, Any] = f"""up_blocks.{i}.resnets.{j}""" lowercase : Tuple = f"""output_blocks.{current_layer}.0""" lowercase : Dict = convert_resnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, has_skip=_UpperCamelCase ) current_layer += 1 if i != len(_UpperCamelCase ) - 1: lowercase : List[str] = f"""up_blocks.{i}.upsamplers.0""" lowercase : int = f"""output_blocks.{current_layer-1}.1""" lowercase : str = convert_resnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): lowercase : List[str] = f"""up_blocks.{i}.resnets.{j}""" lowercase : str = f"""output_blocks.{current_layer}.0""" lowercase : Dict = convert_resnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, has_skip=_UpperCamelCase ) lowercase : List[str] = f"""up_blocks.{i}.attentions.{j}""" lowercase : Union[str, Any] = f"""output_blocks.{current_layer}.1""" lowercase : Optional[int] = convert_attention( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) current_layer += 1 if i != len(_UpperCamelCase ) - 1: lowercase : Union[str, Any] = f"""up_blocks.{i}.upsamplers.0""" lowercase : int = f"""output_blocks.{current_layer-1}.2""" lowercase : Optional[int] = convert_resnet(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) lowercase : int = checkpoint['''out.0.weight'''] lowercase : List[Any] = checkpoint['''out.0.bias'''] lowercase : List[str] = checkpoint['''out.2.weight'''] lowercase : Union[str, Any] = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''') parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.''' ) parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''') __a = parser.parse_args() __a = strabool(args.class_cond) __a = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: __a = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __a = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __a = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: __a = None __a = con_pt_to_diffuser(args.unet_path, unet_config) __a = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __a = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __a = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __a = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') __a = CMStochasticIterativeScheduler(**scheduler_config) __a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
337
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __SCREAMING_SNAKE_CASE ( pl.LightningModule ): def __init__( self , SCREAMING_SNAKE_CASE__ ): super().__init__() lowercase : Any = model lowercase : Optional[Any] = 2 lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __lowerCamelCase ( self ): pass def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase ) lowercase : int = LightningModel(_UpperCamelCase ) lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(_UpperCamelCase ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
337
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __a = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['''ViTFeatureExtractor'''] __a = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __SCREAMING_SNAKE_CASE ( A__ ): A : Any = 'yolos' def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = hidden_size lowercase : int = num_hidden_layers lowercase : str = num_attention_heads lowercase : str = intermediate_size lowercase : Dict = hidden_act lowercase : int = hidden_dropout_prob lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : List[Any] = initializer_range lowercase : Optional[int] = layer_norm_eps lowercase : str = image_size lowercase : Dict = patch_size lowercase : str = num_channels lowercase : Optional[int] = qkv_bias lowercase : List[str] = num_detection_tokens lowercase : List[str] = use_mid_position_embeddings lowercase : Dict = auxiliary_loss # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : Any = bbox_cost lowercase : int = giou_cost # Loss coefficients lowercase : Dict = bbox_loss_coefficient lowercase : Optional[Any] = giou_loss_coefficient lowercase : Tuple = eos_coefficient class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = version.parse('1.11' ) @property def __lowerCamelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self ): return 1E-4 @property def __lowerCamelCase ( self ): return 12
337
1
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.17.0.dev0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''') __a = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : A : Optional[str] = field( default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) A : Optional[str] = field( default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , ) A : int = field( default=1024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A : bool = field( default=A__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) A : bool = field( default=A__ , metadata={ 'help': ( 'Whether to pad all samples to `max_seq_length`. ' 'If False, will pad the samples dynamically when batching to the maximum length in the batch.' ) } , ) A : Optional[int] = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) A : Optional[int] = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) A : Optional[int] = field( default=A__ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of prediction examples to this ' 'value if set.' ) } , ) A : Optional[str] = field( default=A__ , metadata={'help': 'A csv or a json file containing the training data.'} ) A : Optional[str] = field( default=A__ , metadata={'help': 'A csv or a json file containing the validation data.'} ) A : Optional[str] = field(default=A__ , metadata={'help': 'A csv or a json file containing the test data.'} ) def __lowerCamelCase ( self ): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' ) else: lowercase : Tuple = self.train_file.split('''.''' )[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." lowercase : List[Any] = self.validation_file.split('''.''' )[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class __SCREAMING_SNAKE_CASE : A : str = field( default=A__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) A : Optional[str] = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A : Optional[str] = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A : Optional[str] = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) A : bool = field( default=A__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , ) A : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) A : bool = field( default=A__ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) def __lowercase ( ) ->Any: """simple docstring""" lowercase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase : Dict = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], ) lowercase : Optional[Any] = training_args.get_process_log_level() logger.setLevel(_UpperCamelCase ) datasets.utils.logging.set_verbosity(_UpperCamelCase ) transformers.utils.logging.set_verbosity(_UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. lowercase : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. lowercase : Union[str, Any] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. lowercase : Any = {'''train''': data_args.train_file, '''validation''': data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: lowercase : Any = data_args.train_file.split('''.''' )[-1] lowercase : List[Any] = data_args.test_file.split('''.''' )[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." lowercase : str = data_args.test_file else: raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' ) for key in data_files.keys(): logger.info(f"""load a local file for {key}: {data_files[key]}""" ) if data_args.train_file.endswith('''.csv''' ): # Loading a dataset from local csv files lowercase : str = load_dataset('''csv''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir ) else: # Loading a dataset from local json files lowercase : Tuple = load_dataset('''json''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels lowercase : List[Any] = raw_datasets['''train'''].features['''label'''].names lowercase : Optional[Any] = len(_UpperCamelCase ) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # load tapex tokenizer lowercase : Optional[Any] = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, add_prefix_space=_UpperCamelCase, ) lowercase : str = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Padding strategy if data_args.pad_to_max_length: lowercase : Dict = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch lowercase : List[str] = False # Some models have set the order of the labels to use, so let's make sure we do use it. lowercase : Union[str, Any] = {'''Refused''': 0, '''Entailed''': 1} lowercase : int = {0: '''Refused''', 1: '''Entailed'''} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) lowercase : int = min(data_args.max_seq_length, tokenizer.model_max_length ) def preprocess_tabfact_function(_UpperCamelCase ): # Tokenize the texts def _convert_table_text_to_pandas(_UpperCamelCase ): lowercase : List[str] = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )] lowercase : int = pd.DataFrame.from_records(_table_content[1:], columns=_table_content[0] ) return _table_pd lowercase : Union[str, Any] = examples['''statement'''] lowercase : Any = list(map(_convert_table_text_to_pandas, examples['''table_text'''] ) ) lowercase : str = tokenizer(_UpperCamelCase, _UpperCamelCase, padding=_UpperCamelCase, max_length=_UpperCamelCase, truncation=_UpperCamelCase ) lowercase : Dict = examples['''label'''] return result with training_args.main_process_first(desc='''dataset map pre-processing''' ): lowercase : int = raw_datasets.map( _UpperCamelCase, batched=_UpperCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on dataset''', ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError('''--do_train requires a train dataset''' ) lowercase : Tuple = raw_datasets['''train'''] if data_args.max_train_samples is not None: lowercase : Dict = train_dataset.select(range(data_args.max_train_samples ) ) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError('''--do_eval requires a validation dataset''' ) lowercase : Tuple = raw_datasets['''validation'''] if data_args.max_eval_samples is not None: lowercase : Dict = eval_dataset.select(range(data_args.max_eval_samples ) ) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError('''--do_predict requires a test dataset''' ) lowercase : List[Any] = raw_datasets['''test'''] if data_args.max_predict_samples is not None: lowercase : int = predict_dataset.select(range(data_args.max_predict_samples ) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(_UpperCamelCase ) ), 3 ): logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_UpperCamelCase ): lowercase : List[str] = p.predictions[0] if isinstance(p.predictions, _UpperCamelCase ) else p.predictions lowercase : Tuple = np.argmax(_UpperCamelCase, axis=1 ) return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: lowercase : Optional[Any] = default_data_collator elif training_args.fpaa: lowercase : List[Any] = DataCollatorWithPadding(_UpperCamelCase, pad_to_multiple_of=8 ) else: lowercase : int = None # Initialize our Trainer lowercase : int = Trainer( model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, ) # Training if training_args.do_train: lowercase : str = None if training_args.resume_from_checkpoint is not None: lowercase : int = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase : int = last_checkpoint lowercase : Union[str, Any] = trainer.train(resume_from_checkpoint=_UpperCamelCase ) lowercase : Optional[Any] = train_result.metrics lowercase : Optional[Any] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(_UpperCamelCase ) ) lowercase : Union[str, Any] = min(_UpperCamelCase, len(_UpperCamelCase ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''', _UpperCamelCase ) trainer.save_metrics('''train''', _UpperCamelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : Optional[int] = trainer.evaluate(eval_dataset=_UpperCamelCase ) lowercase : int = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_UpperCamelCase ) lowercase : List[Any] = min(_UpperCamelCase, len(_UpperCamelCase ) ) trainer.log_metrics('''eval''', _UpperCamelCase ) trainer.save_metrics('''eval''', _UpperCamelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) # Removing the `label` columns because it contains -1 and Trainer won't like that. lowercase : Optional[Any] = predict_dataset.remove_columns('''label''' ) lowercase : List[str] = trainer.predict(_UpperCamelCase, metric_key_prefix='''predict''' ).predictions lowercase : str = np.argmax(_UpperCamelCase, axis=1 ) lowercase : Optional[Any] = os.path.join(training_args.output_dir, '''predict_results_tabfact.txt''' ) if trainer.is_world_process_zero(): with open(_UpperCamelCase, '''w''' ) as writer: logger.info('''***** Predict Results *****''' ) writer.write('''index\tprediction\n''' ) for index, item in enumerate(_UpperCamelCase ): lowercase : Optional[Any] = label_list[item] writer.write(f"""{index}\t{item}\n""" ) lowercase : List[Any] = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''} if training_args.push_to_hub: trainer.push_to_hub(**_UpperCamelCase ) else: trainer.create_model_card(**_UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->Tuple: """simple docstring""" main() if __name__ == "__main__": main()
337
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version __a = { '''<''': operator.lt, '''<=''': operator.le, '''==''': operator.eq, '''!=''': operator.ne, '''>=''': operator.ge, '''>''': operator.gt, } def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]: """simple docstring""" if got_ver is None or want_ver is None: raise ValueError( f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider""" f""" reinstalling {pkg}.""" ) if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ): raise ImportError( f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" ) def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None: """simple docstring""" lowercase : List[Any] = f"""\n{hint}""" if hint is not None else '''''' # non-versioned check if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ): lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None else: lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but''' f""" got {requirement}""" ) lowercase , lowercase : str = match[0] lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements lowercase : List[Any] = {} for w in want_range: lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,''' f""" but got {requirement}""" ) lowercase , lowercase : Optional[int] = match[0] lowercase : Dict = want_ver if op not in ops: raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" ) # special case if pkg == "python": lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) return # check if any version is installed try: lowercase : List[str] = importlib.metadata.version(_UpperCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main''' return require_version(_UpperCamelCase, _UpperCamelCase )
337
1
from __future__ import annotations from collections.abc import Callable def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 100, ) ->float: """simple docstring""" lowercase : Optional[int] = x_start lowercase : Optional[int] = fnc(_UpperCamelCase ) lowercase : List[str] = 0.0 for _ in range(_UpperCamelCase ): # Approximates small segments of curve as linear and solve # for trapezoidal area lowercase : List[Any] = (x_end - x_start) / steps + xa lowercase : str = fnc(_UpperCamelCase ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step lowercase : List[str] = xa lowercase : Optional[Any] = fxa return area if __name__ == "__main__": def __lowercase ( _UpperCamelCase ) ->Tuple: """simple docstring""" return x**3 + x**2 print('''f(x) = x^3 + x^2''') print('''The area between the curve, x = -5, x = 5 and the x axis is:''') __a = 10 while i <= 10_00_00: print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
337
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __a = logging.get_logger(__name__) __a = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'deta' A : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ): if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = backbone_config.pop('''model_type''' ) lowercase : Any = CONFIG_MAPPING[backbone_model_type] lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = backbone_config lowercase : Union[str, Any] = num_queries lowercase : Any = max_position_embeddings lowercase : int = d_model lowercase : Any = encoder_ffn_dim lowercase : Optional[int] = encoder_layers lowercase : Tuple = encoder_attention_heads lowercase : Optional[Any] = decoder_ffn_dim lowercase : Optional[int] = decoder_layers lowercase : int = decoder_attention_heads lowercase : Any = dropout lowercase : int = attention_dropout lowercase : Dict = activation_dropout lowercase : int = activation_function lowercase : Dict = init_std lowercase : List[str] = init_xavier_std lowercase : Optional[Any] = encoder_layerdrop lowercase : Tuple = auxiliary_loss lowercase : Tuple = position_embedding_type # deformable attributes lowercase : List[str] = num_feature_levels lowercase : Tuple = encoder_n_points lowercase : Optional[int] = decoder_n_points lowercase : Tuple = two_stage lowercase : Optional[Any] = two_stage_num_proposals lowercase : Union[str, Any] = with_box_refine lowercase : Any = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : str = bbox_cost lowercase : List[Any] = giou_cost # Loss coefficients lowercase : Tuple = mask_loss_coefficient lowercase : Any = dice_loss_coefficient lowercase : Dict = bbox_loss_coefficient lowercase : Tuple = giou_loss_coefficient lowercase : Union[str, Any] = eos_coefficient lowercase : Tuple = focal_alpha super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCamelCase ( self ): return self.encoder_attention_heads @property def __lowerCamelCase ( self ): return self.d_model def __lowerCamelCase ( self ): lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Any = self.backbone_config.to_dict() lowercase : List[str] = self.__class__.model_type return output
337
1
import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__=[2, 3, 4] , SCREAMING_SNAKE_CASE__=None , ): lowercase : List[Any] = parent lowercase : Optional[Any] = batch_size lowercase : Dict = image_size lowercase : Optional[int] = num_channels lowercase : Optional[int] = num_stages lowercase : List[str] = hidden_sizes lowercase : Optional[int] = depths lowercase : Tuple = is_training lowercase : Tuple = use_labels lowercase : Any = intermediate_size lowercase : Any = hidden_act lowercase : Union[str, Any] = num_labels lowercase : Optional[int] = initializer_range lowercase : Union[str, Any] = out_features lowercase : str = out_indices lowercase : Any = scope def __lowerCamelCase ( self ): lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Optional[int] = None if self.use_labels: lowercase : int = ids_tensor([self.batch_size] , self.num_labels ) lowercase : str = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = ConvNextModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Dict = model(SCREAMING_SNAKE_CASE__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Union[str, Any] = ConvNextForImageClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = ConvNextBackbone(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Any = model(SCREAMING_SNAKE_CASE__ ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowercase : str = None lowercase : List[str] = ConvNextBackbone(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __lowerCamelCase ( self ): lowercase : Optional[Any] = self.prepare_config_and_inputs() lowercase , lowercase , lowercase : Any = config_and_inputs lowercase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): A : Optional[int] = ( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) A : int = ( {'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification} if is_torch_available() else {} ) A : Optional[int] = True A : List[str] = False A : int = False A : Any = False A : List[str] = False def __lowerCamelCase ( self ): lowercase : int = ConvNextModelTester(self ) lowercase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def __lowerCamelCase ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCamelCase ( self ): return @unittest.skip(reason='''ConvNext does not use inputs_embeds''' ) def __lowerCamelCase ( self ): pass @unittest.skip(reason='''ConvNext does not support input and output embeddings''' ) def __lowerCamelCase ( self ): pass @unittest.skip(reason='''ConvNext does not use feedforward chunking''' ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Any = model_class(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : Any = [*signature.parameters.keys()] lowercase : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): def check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = model_class(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() with torch.no_grad(): lowercase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) lowercase : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : int = self.model_tester.num_stages self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Dict = True check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : str = True check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ ) @slow def __lowerCamelCase ( self ): for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[str] = ConvNextModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) def __lowercase ( ) ->List[str]: """simple docstring""" lowercase : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @cached_property def __lowerCamelCase ( self ): return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None @slow def __lowerCamelCase ( self ): lowercase : Optional[int] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(SCREAMING_SNAKE_CASE__ ) lowercase : int = self.default_image_processor lowercase : Tuple = prepare_img() lowercase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ ) # forward pass with torch.no_grad(): lowercase : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ ) # verify the logits lowercase : Union[str, Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(SCREAMING_SNAKE_CASE__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase , A__ ): A : Union[str, Any] = (ConvNextBackbone,) if is_torch_available() else () A : int = ConvNextConfig A : Dict = False def __lowerCamelCase ( self ): lowercase : Any = ConvNextModelTester(self )
337
def __lowercase ( ) ->List[Any]: """simple docstring""" lowercase : Union[str, Any] = 0 for i in range(1, 1001 ): total += i**i return str(_UpperCamelCase )[-10:] if __name__ == "__main__": print(solution())
337
1
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = [False] * len(_UpperCamelCase ) lowercase : Optional[int] = [] queue.append(_UpperCamelCase ) lowercase : Union[str, Any] = True while queue: lowercase : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCamelCase ) lowercase : Tuple = True lowercase : Optional[Any] = u return visited[t] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" lowercase : List[str] = [-1] * (len(_UpperCamelCase )) lowercase : int = 0 while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : List[str] = float('''Inf''' ) lowercase : int = sink while s != source: # Find the minimum value in select path lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] ) lowercase : Union[str, Any] = parent[s] max_flow += path_flow lowercase : Optional[int] = sink while v != source: lowercase : Any = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : Union[str, Any] = parent[v] return max_flow __a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __a , __a = 0, 5 print(ford_fulkerson(graph, source, sink))
337
import os import re import shutil import sys import tempfile import unittest import black __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __a = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): lowercase : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) lowercase : Any = self.diffusers_dir shutil.copy( os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCamelCase ( self ): lowercase : List[Any] = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with a really long name lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
337
1
import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" lowercase : str = OmegaConf.load(_UpperCamelCase ) lowercase : Tuple = torch.load(_UpperCamelCase, map_location='''cpu''' )['''model'''] lowercase : Dict = list(state_dict.keys() ) # extract state_dict for VQVAE lowercase : int = {} lowercase : Any = '''first_stage_model.''' for key in keys: if key.startswith(_UpperCamelCase ): lowercase : int = state_dict[key] # extract state_dict for UNetLDM lowercase : Union[str, Any] = {} lowercase : Tuple = '''model.diffusion_model.''' for key in keys: if key.startswith(_UpperCamelCase ): lowercase : Any = state_dict[key] lowercase : List[Any] = config.model.params.first_stage_config.params lowercase : List[str] = config.model.params.unet_config.params lowercase : Optional[Any] = VQModel(**_UpperCamelCase ).eval() vqvae.load_state_dict(_UpperCamelCase ) lowercase : Dict = UNetLDMModel(**_UpperCamelCase ).eval() unet.load_state_dict(_UpperCamelCase ) lowercase : str = DDIMScheduler( timesteps=config.model.params.timesteps, beta_schedule='''scaled_linear''', beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=_UpperCamelCase, ) lowercase : List[str] = LDMPipeline(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) pipeline.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', type=str, required=True) parser.add_argument('''--config_path''', type=str, required=True) parser.add_argument('''--output_path''', type=str, required=True) __a = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
337
import math class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1 lowercase : List[Any] = n lowercase : List[Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # adjacency matrix for weight lowercase : Union[str, Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # dp[i][j] stores minimum distance from i to j def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : int = w def __lowerCamelCase ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.dp[u][v] if __name__ == "__main__": __a = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
337
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging __a = logging.get_logger(__name__) __a = {'''vocab_file''': '''spiece.model'''} __a = { '''vocab_file''': { '''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''', } } class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<sep>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<cls>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): lowercase : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=SCREAMING_SNAKE_CASE__ , remove_space=SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , ) lowercase : Union[str, Any] = 3 lowercase : Optional[Any] = do_lower_case lowercase : List[Any] = remove_space lowercase : List[str] = keep_accents lowercase : Tuple = vocab_file lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(SCREAMING_SNAKE_CASE__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( '''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ''' '''See https://pypi.org/project/jieba/ for installation.''' ) lowercase : Tuple = jieba lowercase : Optional[Any] = str.maketrans(''' \n''' , '''\u2582\u2583''' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def __lowerCamelCase ( self ): return len(self.sp_model ) def __lowerCamelCase ( self ): lowercase : int = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ): lowercase : Tuple = self.__dict__.copy() lowercase : Any = None return state def __setstate__( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): lowercase : int = {} lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if self.remove_space: lowercase : int = ''' '''.join(inputs.strip().split() ) else: lowercase : Any = inputs lowercase : List[Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' ) if not self.keep_accents: lowercase : str = unicodedata.normalize('''NFKD''' , SCREAMING_SNAKE_CASE__ ) lowercase : str = ''''''.join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE__ )] ) if self.do_lower_case: lowercase : Union[str, Any] = outputs.lower() return outputs def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = self.preprocess_text(SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ ) lowercase : Any = [] for piece in pieces: if len(SCREAMING_SNAKE_CASE__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit(): lowercase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE__ , '''''' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowercase : List[str] = cur_pieces[1:] else: lowercase : Dict = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(SCREAMING_SNAKE_CASE__ ) else: new_pieces.append(SCREAMING_SNAKE_CASE__ ) return new_pieces def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = ''''''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ''' ''' ).strip() return out_string def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : int = [self.sep_token_id] lowercase : str = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ ) if token_ids_a is not None: return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : Union[str, Any] = [self.sep_token_id] lowercase : Tuple = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase : Optional[Any] = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ ) elif not os.path.isfile(self.vocab_file ): with open(SCREAMING_SNAKE_CASE__ , '''wb''' ) as fi: lowercase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE__ ) return (out_vocab_file,) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = super()._decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) lowercase : str = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' ) return text
337
from __future__ import annotations def __lowercase ( _UpperCamelCase ) ->float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(_UpperCamelCase ) / len(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
337
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''facebook/dpr-ctx_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-reader-single-nq-base''': ( '''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json''' ), '''facebook/dpr-ctx_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-question_encoder-multiset-base''': ( '''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json''' ), '''facebook/dpr-reader-multiset-base''': ( '''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json''' ), } class __SCREAMING_SNAKE_CASE ( A__ ): A : str = 'dpr' def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__ = 0 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) lowercase : str = vocab_size lowercase : Dict = hidden_size lowercase : Dict = num_hidden_layers lowercase : str = num_attention_heads lowercase : Optional[Any] = hidden_act lowercase : Any = intermediate_size lowercase : Any = hidden_dropout_prob lowercase : Union[str, Any] = attention_probs_dropout_prob lowercase : List[Any] = max_position_embeddings lowercase : str = type_vocab_size lowercase : Optional[Any] = initializer_range lowercase : Optional[Any] = layer_norm_eps lowercase : str = projection_dim lowercase : Dict = position_embedding_type
337
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __a = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
337
1
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''nvidia/segformer-b0-finetuned-ade-512-512''': ( '''https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json''' ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class __SCREAMING_SNAKE_CASE ( A__ ): A : int = 'segformer' def __init__( self , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=[2, 2, 2, 2] , SCREAMING_SNAKE_CASE__=[8, 4, 2, 1] , SCREAMING_SNAKE_CASE__=[32, 64, 160, 256] , SCREAMING_SNAKE_CASE__=[7, 3, 3, 3] , SCREAMING_SNAKE_CASE__=[4, 2, 2, 2] , SCREAMING_SNAKE_CASE__=[1, 2, 5, 8] , SCREAMING_SNAKE_CASE__=[4, 4, 4, 4] , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=1E-6 , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=255 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( '''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be''' ''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , SCREAMING_SNAKE_CASE__ , ) lowercase : Any = num_channels lowercase : List[str] = num_encoder_blocks lowercase : Union[str, Any] = depths lowercase : str = sr_ratios lowercase : List[Any] = hidden_sizes lowercase : int = patch_sizes lowercase : int = strides lowercase : int = mlp_ratios lowercase : str = num_attention_heads lowercase : str = hidden_act lowercase : List[Any] = hidden_dropout_prob lowercase : List[str] = attention_probs_dropout_prob lowercase : Optional[Any] = classifier_dropout_prob lowercase : Dict = initializer_range lowercase : List[Any] = drop_path_rate lowercase : str = layer_norm_eps lowercase : List[str] = decoder_hidden_size lowercase : Union[str, Any] = kwargs.get('''reshape_last_stage''' , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = semantic_loss_ignore_index class __SCREAMING_SNAKE_CASE ( A__ ): A : Optional[int] = version.parse('1.11' ) @property def __lowerCamelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self ): return 1E-4 @property def __lowerCamelCase ( self ): return 12
337
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __a = logging.get_logger(__name__) def __lowercase ( _UpperCamelCase ) ->List[int]: """simple docstring""" if isinstance(_UpperCamelCase, np.ndarray ): return list(tensor.shape ) lowercase : Optional[Any] = tf.shape(_UpperCamelCase ) if tensor.shape == tf.TensorShape(_UpperCamelCase ): return dynamic lowercase : Tuple = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )] def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase : int = [1] * inputs.shape.rank lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis] lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase ) lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase ) # Compute layer normalization using the batch_normalization # function. lowercase : List[str] = tf.nn.batch_normalization( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, ) return outputs def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase : Dict = tf.shape(_UpperCamelCase ) lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 ) return tf.reshape(_UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->tf.Tensor: """simple docstring""" if not isinstance(_UpperCamelCase, tf.Tensor ): lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase : str = ( tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None: """simple docstring""" tf.debugging.assert_less( _UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ), ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : List[Any] = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) lowercase : Any = np.asarray(_UpperCamelCase ) lowercase : List[Any] = 1 lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_UpperCamelCase ): lowercase : Optional[int] = chunk_data else: lowercase : int = data def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" if name in group.attrs: lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]] else: lowercase : Optional[Any] = [] lowercase : List[str] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def __lowercase ( _UpperCamelCase ) ->List[str]: """simple docstring""" def _expand_single_ad_tensor(_UpperCamelCase ): if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_UpperCamelCase, axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
337
1
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): A : int = MODEL_FOR_CAUSAL_LM_MAPPING A : Tuple = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __lowerCamelCase ( self ): lowercase : List[str] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowercase : Tuple = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowercase : int = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowercase : Optional[Any] = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE__ , num_return_sequences=2 , return_tensors=SCREAMING_SNAKE_CASE__ ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )}, ] , ) lowercase : Any = text_generator.model.config.eos_token_id lowercase : int = '''<pad>''' lowercase : Optional[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE__ , num_return_sequences=2 , batch_size=2 , return_tensors=SCREAMING_SNAKE_CASE__ , ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ [ {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )}, ], [ {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_token_ids''': ANY(SCREAMING_SNAKE_CASE__ )}, ], ] , ) @require_tf def __lowerCamelCase ( self ): lowercase : List[Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowercase : str = text_generator('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowercase : Optional[int] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = TextGenerationPipeline(model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ ) return text_generator, ["This is a test", "Another test"] def __lowerCamelCase ( self ): lowercase : Union[str, Any] = '''Hello I believe in''' lowercase : Optional[int] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowercase : Optional[int] = text_generator(SCREAMING_SNAKE_CASE__ ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowercase : Tuple = text_generator(SCREAMING_SNAKE_CASE__ , stop_sequence=''' fe''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = text_generator.model lowercase : List[Any] = text_generator.tokenizer lowercase : Dict = text_generator('''This is a test''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowercase : Dict = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowercase : Tuple = pipeline(task='''text-generation''' , model=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , return_full_text=SCREAMING_SNAKE_CASE__ ) lowercase : Any = text_generator('''This is a test''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowercase : Any = text_generator('''This is a test''' , return_full_text=SCREAMING_SNAKE_CASE__ ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowercase : int = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}], [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowercase : Dict = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}], [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}, {'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}], ] , ) with self.assertRaises(SCREAMING_SNAKE_CASE__ ): lowercase : Dict = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE__ , return_text=SCREAMING_SNAKE_CASE__ ) with self.assertRaises(SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = text_generator('''test''' , return_full_text=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) with self.assertRaises(SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = text_generator('''test''' , return_text=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowercase : Dict = text_generator('''''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , [{'''generated_text''': ANY(SCREAMING_SNAKE_CASE__ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowercase : Any = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowercase : Tuple = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowercase : Tuple = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(SCREAMING_SNAKE_CASE__ ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __lowerCamelCase ( self ): import torch # Classic `model_kwargs` lowercase : Union[str, Any] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowercase : List[str] = pipe('''This is a test''' ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowercase : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowercase : Tuple = pipe('''This is a test''' ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowercase : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowercase : Optional[int] = pipe('''This is a test''' ) self.assertEqual( SCREAMING_SNAKE_CASE__ , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __lowerCamelCase ( self ): import torch lowercase : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __lowerCamelCase ( self ): import torch lowercase : List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=SCREAMING_SNAKE_CASE__ , top_p=0.5 ) def __lowerCamelCase ( self ): lowercase : Dict = '''Hello world''' lowercase : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowercase : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowercase : Any = logging.get_logger('''transformers.generation.utils''' ) lowercase : Tuple = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl: lowercase : Union[str, Any] = text_generator(SCREAMING_SNAKE_CASE__ , max_length=10 , max_new_tokens=1 ) self.assertIn(SCREAMING_SNAKE_CASE__ , cl.out ) # The user only sets one -> no warning with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl: lowercase : Any = text_generator(SCREAMING_SNAKE_CASE__ , max_new_tokens=1 ) self.assertNotIn(SCREAMING_SNAKE_CASE__ , cl.out ) with CaptureLogger(SCREAMING_SNAKE_CASE__ ) as cl: lowercase : Tuple = text_generator(SCREAMING_SNAKE_CASE__ , max_length=10 ) self.assertNotIn(SCREAMING_SNAKE_CASE__ , cl.out )
337
def __lowercase ( _UpperCamelCase = 4000000 ) ->int: """simple docstring""" lowercase : int = [] lowercase , lowercase : str = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCamelCase ) lowercase , lowercase : Dict = b, a + b return sum(_UpperCamelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
337
1
import math def __lowercase ( _UpperCamelCase ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(_UpperCamelCase ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowercase ( _UpperCamelCase = 10001 ) ->int: """simple docstring""" try: lowercase : List[Any] = int(_UpperCamelCase ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) lowercase : list[int] = [] lowercase : Dict = 2 while len(_UpperCamelCase ) < nth: if is_prime(_UpperCamelCase ): primes.append(_UpperCamelCase ) num += 1 else: num += 1 return primes[len(_UpperCamelCase ) - 1] if __name__ == "__main__": print(F'''{solution() = }''')
337
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __a = logging.get_logger(__name__) __a = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'perceiver' def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Any = num_latents lowercase : Union[str, Any] = d_latents lowercase : str = d_model lowercase : int = num_blocks lowercase : str = num_self_attends_per_block lowercase : List[str] = num_self_attention_heads lowercase : List[str] = num_cross_attention_heads lowercase : int = qk_channels lowercase : List[Any] = v_channels lowercase : int = cross_attention_shape_for_attention lowercase : Tuple = self_attention_widening_factor lowercase : Dict = cross_attention_widening_factor lowercase : Any = hidden_act lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : Union[str, Any] = initializer_range lowercase : Any = layer_norm_eps lowercase : Any = use_query_residual # masked language modeling attributes lowercase : List[str] = vocab_size lowercase : Dict = max_position_embeddings # image classification attributes lowercase : int = image_size # flow attributes lowercase : List[Any] = train_size # multimodal autoencoding attributes lowercase : List[Any] = num_frames lowercase : Union[str, Any] = audio_samples_per_frame lowercase : int = samples_per_patch lowercase : Optional[int] = output_shape class __SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCamelCase ( self ): if self.task == "multiple-choice": lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def __lowerCamelCase ( self ): return 1E-4 def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : str = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ ) # Generate dummy inputs according to compute batch and sequence lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''input_ids''' ) return inputs elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch ) lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
337
1
from collections.abc import Generator from math import sin def __lowercase ( _UpperCamelCase ) ->bytes: """simple docstring""" if len(_UpperCamelCase ) != 32: raise ValueError('''Input must be of length 32''' ) lowercase : Any = B'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __lowercase ( _UpperCamelCase ) ->bytes: """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) lowercase : Any = format(_UpperCamelCase, '''08x''' )[-8:] lowercase : Optional[int] = B'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __lowercase ( _UpperCamelCase ) ->bytes: """simple docstring""" lowercase : Optional[int] = B'''''' for char in message: bit_string += format(_UpperCamelCase, '''08b''' ).encode('''utf-8''' ) lowercase : Any = format(len(_UpperCamelCase ), '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_UpperCamelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __lowercase ( _UpperCamelCase ) ->Generator[list[int], None, None]: """simple docstring""" if len(_UpperCamelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0, len(_UpperCamelCase ), 512 ): lowercase : Union[str, Any] = bit_string[pos : pos + 512] lowercase : Dict = [] for i in range(0, 512, 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ), 2 ) ) yield block_words def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) lowercase : Dict = format(_UpperCamelCase, '''032b''' ) lowercase : Tuple = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(_UpperCamelCase, 2 ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" return (a + b) % 2**32 def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __lowercase ( _UpperCamelCase ) ->bytes: """simple docstring""" lowercase : Optional[int] = preprocess(_UpperCamelCase ) lowercase : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states lowercase : Tuple = 0X67_452_301 lowercase : Tuple = 0Xef_cda_b89 lowercase : Tuple = 0X98_bad_cfe lowercase : Optional[int] = 0X10_325_476 lowercase : List[Any] = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_UpperCamelCase ): lowercase : Tuple = aa lowercase : List[Any] = ba lowercase : Dict = ca lowercase : Dict = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f lowercase : int = d ^ (b & (c ^ d)) lowercase : Optional[Any] = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f lowercase : List[str] = c ^ (d & (b ^ c)) lowercase : List[str] = (5 * i + 1) % 16 elif i <= 47: lowercase : str = b ^ c ^ d lowercase : Optional[int] = (3 * i + 5) % 16 else: lowercase : Tuple = c ^ (b | not_aa(_UpperCamelCase )) lowercase : str = (7 * i) % 16 lowercase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32 lowercase : Optional[Any] = d lowercase : Union[str, Any] = c lowercase : List[str] = b lowercase : str = sum_aa(_UpperCamelCase, left_rotate_aa(_UpperCamelCase, shift_amounts[i] ) ) # Add hashed chunk to running total lowercase : List[Any] = sum_aa(_UpperCamelCase, _UpperCamelCase ) lowercase : Tuple = sum_aa(_UpperCamelCase, _UpperCamelCase ) lowercase : str = sum_aa(_UpperCamelCase, _UpperCamelCase ) lowercase : Any = sum_aa(_UpperCamelCase, _UpperCamelCase ) lowercase : List[Any] = reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) + reformat_hex(_UpperCamelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
337
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __lowercase ( _UpperCamelCase = 8 ) ->str: """simple docstring""" lowercase : List[str] = ascii_letters + digits + punctuation return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str: """simple docstring""" i -= len(_UpperCamelCase ) lowercase : Dict = i // 3 lowercase : List[str] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowercase : Union[str, Any] = ( chars_incl + random(_UpperCamelCase, quotient + remainder ) + random(_UpperCamelCase, _UpperCamelCase ) + random(_UpperCamelCase, _UpperCamelCase ) ) lowercase : Union[str, Any] = list(_UpperCamelCase ) shuffle(_UpperCamelCase ) return "".join(_UpperCamelCase ) # random is a generalised function for letters, characters and numbers def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str: """simple docstring""" return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool: """simple docstring""" if len(_UpperCamelCase ) < min_length: # Your Password must be at least 8 characters long return False lowercase : str = any(char in ascii_uppercase for char in password ) lowercase : List[str] = any(char in ascii_lowercase for char in password ) lowercase : Dict = any(char in digits for char in password ) lowercase : Tuple = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __lowercase ( ) ->Dict: """simple docstring""" lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() ) lowercase : Optional[Any] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''', password_generator(_UpperCamelCase ) ) print( '''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
337
1
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __a = 10 def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" for i in range(_UpperCamelCase, _UpperCamelCase ): if array[i] == target: return i return -1 def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" lowercase : Optional[Any] = 0 lowercase : List[Any] = len(_UpperCamelCase ) while left <= right: if right - left < precision: return lin_search(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) lowercase : Union[str, Any] = (left + right) // 3 + 1 lowercase : Tuple = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: lowercase : Dict = one_third - 1 elif array[two_third] < target: lowercase : Union[str, Any] = two_third + 1 else: lowercase : Union[str, Any] = one_third + 1 lowercase : Optional[int] = two_third - 1 else: return -1 def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" if left < right: if right - left < precision: return lin_search(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) lowercase : Optional[int] = (left + right) // 3 + 1 lowercase : List[Any] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(_UpperCamelCase, one_third - 1, _UpperCamelCase, _UpperCamelCase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) else: return rec_ternary_search(one_third + 1, two_third - 1, _UpperCamelCase, _UpperCamelCase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __a = input('''Enter numbers separated by comma:\n''').strip() __a = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), F"List must be ordered.\n{collection}." __a = int(input('''Enter the number to be found in the list:\n''').strip()) __a = ite_ternary_search(collection, target) __a = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F'''Iterative search: {target} found at positions: {resulta}''') print(F'''Recursive search: {target} found at positions: {resulta}''') else: print('''Not found''')
337
from __future__ import annotations __a = [] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" for i in range(len(_UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" if row >= len(_UpperCamelCase ): solution.append(_UpperCamelCase ) printboard(_UpperCamelCase ) print() return True for i in range(len(_UpperCamelCase ) ): if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : int = 1 solve(_UpperCamelCase, row + 1 ) lowercase : Tuple = 0 return False def __lowercase ( _UpperCamelCase ) ->None: """simple docstring""" for i in range(len(_UpperCamelCase ) ): for j in range(len(_UpperCamelCase ) ): if board[i][j] == 1: print('''Q''', end=''' ''' ) else: print('''.''', end=''' ''' ) print() # n=int(input("The no. of queens")) __a = 8 __a = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
337
1
from sklearn.metrics import matthews_corrcoef import datasets __a = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' __a = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' __a = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def __lowerCamelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): return { "matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , sample_weight=SCREAMING_SNAKE_CASE__ ) ), }
337
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def __lowercase ( _UpperCamelCase, _UpperCamelCase=False ) ->Tuple: """simple docstring""" lowercase : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowercase : Any = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=False ) ->Optional[Any]: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: lowercase : Optional[int] = '''''' else: lowercase : Optional[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase : Union[str, Any] = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) lowercase : Any = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase : List[str] = in_proj_bias[: config.hidden_size] lowercase : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase : Dict = in_proj_bias[-config.hidden_size :] def __lowercase ( _UpperCamelCase ) ->Any: """simple docstring""" lowercase : Tuple = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(_UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]: """simple docstring""" lowercase : Tuple = dct.pop(_UpperCamelCase ) lowercase : List[Any] = val def __lowercase ( ) ->int: """simple docstring""" lowercase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase : str = Image.open(requests.get(_UpperCamelCase, stream=_UpperCamelCase ).raw ) return im @torch.no_grad() def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=True ) ->Any: """simple docstring""" lowercase : Dict = ViTConfig() # patch_size if model_name[-1] == "8": lowercase : Optional[int] = 8 # set labels if required if not base_model: lowercase : Union[str, Any] = 1000 lowercase : str = '''huggingface/label-files''' lowercase : Tuple = '''imagenet-1k-id2label.json''' lowercase : List[str] = json.load(open(hf_hub_download(_UpperCamelCase, _UpperCamelCase, repo_type='''dataset''' ), '''r''' ) ) lowercase : Tuple = {int(_UpperCamelCase ): v for k, v in idalabel.items()} lowercase : List[Any] = idalabel lowercase : Tuple = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: lowercase : List[Any] = 384 lowercase : List[Any] = 1536 lowercase : str = 12 lowercase : Union[str, Any] = 6 # load original model from torch hub lowercase : List[Any] = torch.hub.load('''facebookresearch/dino:main''', _UpperCamelCase ) original_model.eval() # load state_dict of original model, remove and rename some keys lowercase : Dict = original_model.state_dict() if base_model: remove_classification_head_(_UpperCamelCase ) lowercase : Optional[int] = create_rename_keys(_UpperCamelCase, base_model=_UpperCamelCase ) for src, dest in rename_keys: rename_key(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) read_in_q_k_v(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) # load HuggingFace model if base_model: lowercase : Optional[Any] = ViTModel(_UpperCamelCase, add_pooling_layer=_UpperCamelCase ).eval() else: lowercase : Any = ViTForImageClassification(_UpperCamelCase ).eval() model.load_state_dict(_UpperCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor lowercase : List[str] = ViTImageProcessor() lowercase : Optional[int] = image_processor(images=prepare_img(), return_tensors='''pt''' ) lowercase : Union[str, Any] = encoding['''pixel_values'''] lowercase : int = model(_UpperCamelCase ) if base_model: lowercase : int = original_model(_UpperCamelCase ) assert torch.allclose(_UpperCamelCase, outputs.last_hidden_state[:, 0, :], atol=1e-1 ) else: lowercase : Union[str, Any] = original_model(_UpperCamelCase ) assert logits.shape == outputs.logits.shape assert torch.allclose(_UpperCamelCase, outputs.logits, atol=1e-3 ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_UpperCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''dino_vitb16''', type=str, help='''Name of the model trained with DINO you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether to only convert the base model (no projection head weights).''', ) parser.set_defaults(base_model=True) __a = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
337
from collections.abc import Callable class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ = None ): # Stores actual heap items. lowercase : list = [] # Stores indexes of each item for supporting updates and deletion. lowercase : dict = {} # Stores current size of heap. lowercase : str = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return int((i - 1) / 2 ) if i > 0 else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : Dict = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. lowercase , lowercase : int = self.arr[j], self.arr[i] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.arr[i][1] < self.arr[j][1] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : int = self._left(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = i if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = left if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = right return valid_parent def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ ) while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) while valid_parent != index: self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : str = self.pos_map[item] lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : List[str] = self.pos_map[item] del self.pos_map[item] lowercase : Optional[int] = self.arr[self.size - 1] lowercase : int = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] ) else: lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )] lowercase : str = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowerCamelCase ( self ): return self.arr[0] if self.size else None def __lowerCamelCase ( self ): lowercase : str = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def __lowercase ( ) ->None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
337
1
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" def is_in_circle(_UpperCamelCase, _UpperCamelCase ) -> bool: lowercase : List[str] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle lowercase : List[str] = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(_UpperCamelCase ) ) # The ratio of the area for circle to square is pi/4. lowercase : str = proportion * 4 print(f"""The estimated value of pi is {pi_estimate}""" ) print(f"""The numpy value of pi is {pi}""" ) print(f"""The total error is {abs(pi - pi_estimate )}""" ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0.0, _UpperCamelCase = 1.0, ) ->float: """simple docstring""" return mean( function_to_integrate(uniform(_UpperCamelCase, _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value) def __lowercase ( _UpperCamelCase, _UpperCamelCase = 0.0, _UpperCamelCase = 1.0 ) ->None: """simple docstring""" def identity_function(_UpperCamelCase ) -> float: return x lowercase : Union[str, Any] = area_under_curve_estimator( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) lowercase : Tuple = (max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {expected_value}""" ) print(f"""Total error is {abs(estimated_value - expected_value )}""" ) print('''******************''' ) def __lowercase ( _UpperCamelCase ) ->None: """simple docstring""" def function_to_integrate(_UpperCamelCase ) -> float: return sqrt(4.0 - x * x ) lowercase : int = area_under_curve_estimator( _UpperCamelCase, _UpperCamelCase, 0.0, 2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(f"""Estimated value is {estimated_value}""" ) print(f"""Expected value is {pi}""" ) print(f"""Total error is {abs(estimated_value - pi )}""" ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
337
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __SCREAMING_SNAKE_CASE ( A__ ): A : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
337
1
from __future__ import annotations from collections import Counter from random import random class __SCREAMING_SNAKE_CASE : def __init__( self ): lowercase : int = {} def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Union[str, Any] = {} def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE__ ) if nodea not in self.connections: self.add_node(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = probability def __lowerCamelCase ( self ): return list(self.connections ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = 0 lowercase : Optional[int] = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->dict[str, int]: """simple docstring""" lowercase : Any = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) lowercase : Any = Counter(graph.get_nodes() ) lowercase : List[str] = start for _ in range(_UpperCamelCase ): lowercase : Optional[Any] = graph.transition(_UpperCamelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
337
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __a = logging.get_logger(__name__) __a = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __a = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } __a = {'''facebook/blenderbot-3B''': 1_28} class __SCREAMING_SNAKE_CASE ( A__ ): A : Dict = VOCAB_FILES_NAMES A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[int] = ['input_ids', 'attention_mask'] A : str = BlenderbotTokenizer def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ): super().__init__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) ) lowercase : str = add_prefix_space lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = add_prefix_space lowercase : str = '''post_processor''' lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if tokenizer_component_instance: lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase : Tuple = tuple(state['''sep'''] ) if "cls" in state: lowercase : Union[str, Any] = tuple(state['''cls'''] ) lowercase : Optional[int] = False if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: lowercase : Any = add_prefix_space lowercase : Tuple = True if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets: lowercase : List[str] = trim_offsets lowercase : Optional[int] = True if changes_to_apply: lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) ) lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __lowerCamelCase ( self ): if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value lowercase : Any = value def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : Tuple = [self.sep_token_id] lowercase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): return token_ids_a + [self.eos_token_id] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ ) lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length: lowercase : Tuple = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
337
1
from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig __a = logging.get_logger(__name__) __a = '''T5Config''' class __SCREAMING_SNAKE_CASE ( A__ ): A : Optional[Any] = 'mt5' A : str = MTaConfig class __SCREAMING_SNAKE_CASE ( A__ ): A : Optional[int] = 'mt5' A : str = MTaConfig class __SCREAMING_SNAKE_CASE ( A__ ): A : Tuple = 'mt5' A : Dict = MTaConfig
337
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowercase ( ) ->int: """simple docstring""" lowercase : Tuple = HfArgumentParser(_UpperCamelCase ) lowercase : List[str] = parser.parse_args_into_dataclasses()[0] lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase ) try: lowercase : Any = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] ) lowercase : Any = '''''' lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] ) lowercase : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase ) raise ValueError(_UpperCamelCase ) benchmark.run() if __name__ == "__main__": main()
337
1
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate __a = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('''''', '''|''', '''|'''), datarow=DataRow('''''', '''|''', '''|'''), padding=1, with_header_hide=None, ) __a = [] __a = [] __a = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}} __a = [ { '''type''': '''header''', '''text''': { '''type''': '''plain_text''', '''text''': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''', '''emoji''': True, }, } ] __a = 0 for log in Path().glob('''*.log'''): __a = 0 with open(log, '''r''') as f: for line in f: __a = json.loads(line) if line.get('''nodeid''', '''''') != "": __a = line['''nodeid'''] if line.get('''duration''', None) is not None: __a = F'''{line["duration"]:.4f}''' if line.get('''outcome''', '''''') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('''_''')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) __a = [] log.unlink() __a = '''''' __a = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" __a = [] __a = {} for test in failed_tests: __a = test[0].split('''::''') __a = data[0].split('''/''')[-1] if data[0] not in filesafailed: __a = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) __a = [test[0] for test in failed_table] __a = list(set(files)) # Count number of instances in failed_tests __a = [] for file in individual_files: table.append([file, len(filesafailed[file])]) __a = tabulate( table, headers=['''Test Location''', '''Num Failed'''], tablefmt=hf_table_format, stralign='''right''', ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 30_00: __a = '''Too many failed tests, please see the full report in the Action results.''' __a = len(err) + 10 __a = message[: 30_00 - offset] + F'''\n...\n```\n{err}''' print(F'''### {message}''') else: __a = '''No failed tests! 🤗''' print(F'''## {message}''') payload.append(no_error_payload) if os.environ.get('''TEST_TYPE''', '''''') != "": from slack_sdk import WebClient __a = WebClient(token=os.environ['''SLACK_API_TOKEN''']) if message != "No failed tests! 🤗": __a = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': message, }, } payload.append(md_report) __a = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': '''*For more details:*''', }, '''accessory''': { '''type''': '''button''', '''text''': { '''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True, }, '''url''': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''', }, } payload.append(action_button) __a = { '''type''': '''context''', '''elements''': [ { '''type''': '''plain_text''', '''text''': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''', } ], } payload.append(date_report) __a = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload) __a = response.data['''ts'''] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name __a = '''''' for i, row in enumerate(test_failures): if row[0] != test_class: __a = row[0] else: __a = '''''' __a = { '''type''': '''section''', '''text''': { '''type''': '''mrkdwn''', '''text''': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''', }, } client.chat_postMessage( channel='''#accelerate-ci-daily''', thread_ts=ts, blocks=[payload], )
337
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = [False] * len(_UpperCamelCase ) lowercase : Optional[int] = [] queue.append(_UpperCamelCase ) lowercase : Union[str, Any] = True while queue: lowercase : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCamelCase ) lowercase : Tuple = True lowercase : Optional[Any] = u return visited[t] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" lowercase : List[str] = [-1] * (len(_UpperCamelCase )) lowercase : int = 0 while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : List[str] = float('''Inf''' ) lowercase : int = sink while s != source: # Find the minimum value in select path lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] ) lowercase : Union[str, Any] = parent[s] max_flow += path_flow lowercase : Optional[int] = sink while v != source: lowercase : Any = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : Union[str, Any] = parent[v] return max_flow __a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __a , __a = 0, 5 print(ford_fulkerson(graph, source, sink))
337
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def __lowercase ( _UpperCamelCase ) ->Tuple: """simple docstring""" lowercase : List[str] = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:] ) class __SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ): A : int = StableDiffusionLatentUpscalePipeline A : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { 'height', 'width', 'cross_attention_kwargs', 'negative_prompt_embeds', 'prompt_embeds', } A : int = PipelineTesterMixin.required_optional_params - {'num_images_per_prompt'} A : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS A : List[str] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess A : Union[str, Any] = frozenset([] ) A : List[str] = True @property def __lowerCamelCase ( self ): lowercase : Optional[Any] = 1 lowercase : List[str] = 4 lowercase : List[Any] = (16, 16) lowercase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) return image def __lowerCamelCase ( self ): torch.manual_seed(0 ) lowercase : Dict = UNetaDConditionModel( act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=SCREAMING_SNAKE_CASE__ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=( '''KDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', '''KCrossAttnDownBlock2D''', ) , in_channels=8 , mid_block_type=SCREAMING_SNAKE_CASE__ , only_cross_attention=SCREAMING_SNAKE_CASE__ , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , ) lowercase : List[str] = AutoencoderKL( block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[ '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D''', ] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) lowercase : List[Any] = EulerDiscreteScheduler(prediction_type='''sample''' ) lowercase : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , ) lowercase : List[str] = CLIPTextModel(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase : Any = { '''unet''': model.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ): if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): lowercase : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: lowercase : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': self.dummy_image.cpu(), '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def __lowerCamelCase ( self ): lowercase : Any = '''cpu''' lowercase : Any = self.get_dummy_components() lowercase : Tuple = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) lowercase : Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images lowercase : List[str] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 256, 256, 3) ) lowercase : Tuple = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) lowercase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1E-3 ) def __lowerCamelCase ( self ): super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 ) def __lowerCamelCase ( self ): super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 ) def __lowerCamelCase ( self ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __lowerCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=7E-3 ) def __lowerCamelCase ( self ): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 ) def __lowerCamelCase ( self ): super().test_save_load_local(expected_max_difference=3E-3 ) def __lowerCamelCase ( self ): super().test_save_load_optional_components(expected_max_difference=3E-3 ) def __lowerCamelCase ( self ): lowercase : List[str] = [ '''DDIMScheduler''', '''DDPMScheduler''', '''PNDMScheduler''', '''HeunDiscreteScheduler''', '''EulerAncestralDiscreteScheduler''', '''KDPM2DiscreteScheduler''', '''KDPM2AncestralDiscreteScheduler''', '''DPMSolverSDEScheduler''', ] lowercase : Dict = self.get_dummy_components() lowercase : Optional[Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ ) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=SCREAMING_SNAKE_CASE__ ) pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) lowercase : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) lowercase : int = 2 lowercase : int = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , scheduler_enum.name ) lowercase : Any = scheduler_cls.from_config(pipe.scheduler.config ) lowercase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ )[0] outputs.append(SCREAMING_SNAKE_CASE__ ) assert check_same_shape(SCREAMING_SNAKE_CASE__ ) @require_torch_gpu @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): lowercase : Any = torch.manual_seed(33 ) lowercase : str = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) lowercase : Any = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) lowercase : List[Any] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic''' lowercase : Dict = pipe(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='''latent''' ).images lowercase : List[Any] = upscaler( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ).images[0] lowercase : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' ) assert np.abs((expected_image - image).mean() ) < 5E-2 def __lowerCamelCase ( self ): lowercase : int = torch.manual_seed(33 ) lowercase : str = StableDiffusionLatentUpscalePipeline.from_pretrained( '''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa ) upscaler.to('''cuda''' ) lowercase : Optional[int] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas''' lowercase : int = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' ) lowercase : str = upscaler( prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , guidance_scale=0 , generator=SCREAMING_SNAKE_CASE__ , output_type='''np''' , ).images[0] lowercase : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-2
337
from typing import List from .keymap import KEYMAP, get_character def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += [key] setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator def __lowercase ( *_UpperCamelCase ) ->Any: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += keys setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator class __SCREAMING_SNAKE_CASE ( A__ ): def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ): setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} ) setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] ) for key in handled_keys: lowercase : List[Any] = value return new_cls @staticmethod def __lowerCamelCase ( cls ): lowercase : Dict = get_character() if char != KEYMAP["undefined"]: lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ ) if handler: lowercase : Tuple = char return handler(cls ) else: return None def __lowercase ( cls ) ->Any: """simple docstring""" return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
337
1
from __future__ import annotations import math def __lowercase ( _UpperCamelCase ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(_UpperCamelCase ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True __a = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def __lowercase ( _UpperCamelCase ) ->list[int]: """simple docstring""" if not isinstance(_UpperCamelCase, _UpperCamelCase ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) lowercase : str = [] for num in range(len(_UpperCamelCase ) ): lowercase : Optional[Any] = 0 while 2 * i * i <= odd_composites[num]: lowercase : List[str] = odd_composites[num] - 2 * i * i if is_prime(_UpperCamelCase ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(_UpperCamelCase ) == n: return list_nums return [] def __lowercase ( ) ->int: """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(F'''{solution() = }''')
337
import logging import os from .state import PartialState class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ): @staticmethod def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ ) if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ): if self._should_log(SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) elif in_order: lowercase : List[Any] = PartialState() for i in range(state.num_processes ): if i == state.process_index: lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) state.wait_for_everyone() def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]: """simple docstring""" if log_level is None: lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase ) lowercase : str = logging.getLogger(_UpperCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_UpperCamelCase, {} )
337
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { '''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig'''] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['''RemBertTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['''RemBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RemBertForCausalLM''', '''RemBertForMaskedLM''', '''RemBertForMultipleChoice''', '''RemBertForQuestionAnswering''', '''RemBertForSequenceClassification''', '''RemBertForTokenClassification''', '''RemBertLayer''', '''RemBertModel''', '''RemBertPreTrainedModel''', '''load_tf_weights_in_rembert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRemBertForCausalLM''', '''TFRemBertForMaskedLM''', '''TFRemBertForMultipleChoice''', '''TFRemBertForQuestionAnswering''', '''TFRemBertForSequenceClassification''', '''TFRemBertForTokenClassification''', '''TFRemBertLayer''', '''TFRemBertModel''', '''TFRemBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __SCREAMING_SNAKE_CASE ( pl.LightningModule ): def __init__( self , SCREAMING_SNAKE_CASE__ ): super().__init__() lowercase : Any = model lowercase : Optional[Any] = 2 lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __lowerCamelCase ( self ): pass def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase ) lowercase : int = LightningModel(_UpperCamelCase ) lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(_UpperCamelCase ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
337
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['''ReformerTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['''ReformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ReformerAttention''', '''ReformerForMaskedLM''', '''ReformerForQuestionAnswering''', '''ReformerForSequenceClassification''', '''ReformerLayer''', '''ReformerModel''', '''ReformerModelWithLMHead''', '''ReformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __SCREAMING_SNAKE_CASE ( A__ ): A : Any = 'yolos' def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = hidden_size lowercase : int = num_hidden_layers lowercase : str = num_attention_heads lowercase : str = intermediate_size lowercase : Dict = hidden_act lowercase : int = hidden_dropout_prob lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : List[Any] = initializer_range lowercase : Optional[int] = layer_norm_eps lowercase : str = image_size lowercase : Dict = patch_size lowercase : str = num_channels lowercase : Optional[int] = qkv_bias lowercase : List[str] = num_detection_tokens lowercase : List[str] = use_mid_position_embeddings lowercase : Dict = auxiliary_loss # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : Any = bbox_cost lowercase : int = giou_cost # Loss coefficients lowercase : Dict = bbox_loss_coefficient lowercase : Optional[Any] = giou_loss_coefficient lowercase : Tuple = eos_coefficient class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = version.parse('1.11' ) @property def __lowerCamelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self ): return 1E-4 @property def __lowerCamelCase ( self ): return 12
337
1
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __a = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] __a = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] __a = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) __a = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) __a = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" for tf_name, hf_name in patterns: lowercase : Tuple = k.replace(_UpperCamelCase, _UpperCamelCase ) return k def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->BigBirdPegasusForConditionalGeneration: """simple docstring""" lowercase : List[Any] = BigBirdPegasusConfig(**_UpperCamelCase ) lowercase : Optional[Any] = BigBirdPegasusForConditionalGeneration(_UpperCamelCase ) lowercase : List[Any] = torch_model.state_dict() lowercase : List[Any] = {} # separating decoder weights lowercase : List[str] = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} lowercase : List[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items(), '''tf -> hf conversion''' ): lowercase : Optional[Any] = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue lowercase : List[Any] = DECODER_PATTERNS lowercase : int = rename_state_dict_key(_UpperCamelCase, _UpperCamelCase ) if new_k not in state_dict: raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): lowercase : List[str] = v.T lowercase : List[Any] = torch.from_numpy(_UpperCamelCase ) assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items(), '''tf -> hf conversion''' ): lowercase : Union[str, Any] = [k.endswith(_UpperCamelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCamelCase ): continue lowercase : List[str] = REMAINING_PATTERNS lowercase : Dict = rename_state_dict_key(_UpperCamelCase, _UpperCamelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): lowercase : Dict = v.T lowercase : str = torch.from_numpy(_UpperCamelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" lowercase : List[str] = mapping['''model.embed_positions.weight'''] lowercase : Tuple = mapping.pop('''model.embed_positions.weight''' ) lowercase , lowercase : Optional[Any] = torch_model.load_state_dict(_UpperCamelCase, strict=_UpperCamelCase ) lowercase : List[Any] = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], f"""no matches found for the following tf keys {extra}""" return torch_model def __lowercase ( _UpperCamelCase ) ->Dict: """simple docstring""" lowercase : Optional[Any] = tf.train.list_variables(_UpperCamelCase ) lowercase : Any = {} lowercase : Optional[int] = ['''global_step'''] for name, shape in tqdm(_UpperCamelCase, desc='''converting tf checkpoint to dict''' ): lowercase : List[str] = any(pat in name for pat in ignore_name ) if skip_key: continue lowercase : List[Any] = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase ) lowercase : Dict = array return tf_weights def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Any: """simple docstring""" lowercase : Optional[Any] = get_tf_weights_as_numpy(_UpperCamelCase ) lowercase : Tuple = convert_bigbird_pegasus(_UpperCamelCase, _UpperCamelCase ) torch_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') __a = parser.parse_args() __a = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
337
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version __a = { '''<''': operator.lt, '''<=''': operator.le, '''==''': operator.eq, '''!=''': operator.ne, '''>=''': operator.ge, '''>''': operator.gt, } def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]: """simple docstring""" if got_ver is None or want_ver is None: raise ValueError( f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider""" f""" reinstalling {pkg}.""" ) if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ): raise ImportError( f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" ) def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None: """simple docstring""" lowercase : List[Any] = f"""\n{hint}""" if hint is not None else '''''' # non-versioned check if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ): lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None else: lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but''' f""" got {requirement}""" ) lowercase , lowercase : str = match[0] lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements lowercase : List[Any] = {} for w in want_range: lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,''' f""" but got {requirement}""" ) lowercase , lowercase : Optional[int] = match[0] lowercase : Dict = want_ver if op not in ops: raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" ) # special case if pkg == "python": lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) return # check if any version is installed try: lowercase : List[str] = importlib.metadata.version(_UpperCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main''' return require_version(_UpperCamelCase, _UpperCamelCase )
337
1
from __future__ import annotations import math import random from typing import Any class __SCREAMING_SNAKE_CASE : def __init__( self ): lowercase : list[Any] = [] lowercase : int = 0 lowercase : int = 0 def __lowerCamelCase ( self ): return self.head == self.tail def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): self.data.append(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = self.tail + 1 def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.data[self.head] lowercase : Optional[int] = self.head + 1 return ret def __lowerCamelCase ( self ): return self.tail - self.head def __lowerCamelCase ( self ): print(self.data ) print('''**************''' ) print(self.data[self.head : self.tail] ) class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = data lowercase : MyNode | None = None lowercase : MyNode | None = None lowercase : int = 1 def __lowerCamelCase ( self ): return self.data def __lowerCamelCase ( self ): return self.left def __lowerCamelCase ( self ): return self.right def __lowerCamelCase ( self ): return self.height def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = data def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Union[str, Any] = node def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = node def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = height def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" if node is None: return 0 return node.get_height() def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" if a > b: return a return b def __lowercase ( _UpperCamelCase ) ->MyNode: """simple docstring""" print('''left rotation node:''', node.get_data() ) lowercase : List[str] = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(_UpperCamelCase ) lowercase : Union[str, Any] = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1 node.set_height(_UpperCamelCase ) lowercase : Optional[int] = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1 ret.set_height(_UpperCamelCase ) return ret def __lowercase ( _UpperCamelCase ) ->MyNode: """simple docstring""" print('''right rotation node:''', node.get_data() ) lowercase : Union[str, Any] = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(_UpperCamelCase ) lowercase : Union[str, Any] = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1 node.set_height(_UpperCamelCase ) lowercase : Dict = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1 ret.set_height(_UpperCamelCase ) return ret def __lowercase ( _UpperCamelCase ) ->MyNode: """simple docstring""" lowercase : Any = node.get_left() assert left_child is not None node.set_left(left_rotation(_UpperCamelCase ) ) return right_rotation(_UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->MyNode: """simple docstring""" lowercase : List[Any] = node.get_right() assert right_child is not None node.set_right(right_rotation(_UpperCamelCase ) ) return left_rotation(_UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->MyNode | None: """simple docstring""" if node is None: return MyNode(_UpperCamelCase ) if data < node.get_data(): node.set_left(insert_node(node.get_left(), _UpperCamelCase ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected lowercase : Optional[Any] = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child lowercase : Tuple = right_rotation(_UpperCamelCase ) else: lowercase : Optional[Any] = lr_rotation(_UpperCamelCase ) else: node.set_right(insert_node(node.get_right(), _UpperCamelCase ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: lowercase : str = node.get_right() assert right_child is not None if data < right_child.get_data(): lowercase : Tuple = rl_rotation(_UpperCamelCase ) else: lowercase : Optional[int] = left_rotation(_UpperCamelCase ) lowercase : Optional[Any] = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1 node.set_height(_UpperCamelCase ) return node def __lowercase ( _UpperCamelCase ) ->Any: """simple docstring""" while True: lowercase : Union[str, Any] = root.get_right() if right_child is None: break lowercase : str = right_child return root.get_data() def __lowercase ( _UpperCamelCase ) ->Any: """simple docstring""" while True: lowercase : str = root.get_left() if left_child is None: break lowercase : Optional[Any] = left_child return root.get_data() def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->MyNode | None: """simple docstring""" lowercase : str = root.get_left() lowercase : str = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: lowercase : Union[str, Any] = get_left_most(_UpperCamelCase ) root.set_data(_UpperCamelCase ) root.set_right(del_node(_UpperCamelCase, _UpperCamelCase ) ) elif left_child is not None: lowercase : int = left_child elif right_child is not None: lowercase : Tuple = right_child else: return None elif root.get_data() > data: if left_child is None: print('''No such data''' ) return root else: root.set_left(del_node(_UpperCamelCase, _UpperCamelCase ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(_UpperCamelCase, _UpperCamelCase ) ) if get_height(_UpperCamelCase ) - get_height(_UpperCamelCase ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): lowercase : List[str] = left_rotation(_UpperCamelCase ) else: lowercase : int = rl_rotation(_UpperCamelCase ) elif get_height(_UpperCamelCase ) - get_height(_UpperCamelCase ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): lowercase : List[Any] = right_rotation(_UpperCamelCase ) else: lowercase : List[Any] = lr_rotation(_UpperCamelCase ) lowercase : Any = my_max(get_height(root.get_right() ), get_height(root.get_left() ) ) + 1 root.set_height(_UpperCamelCase ) return root class __SCREAMING_SNAKE_CASE : def __init__( self ): lowercase : MyNode | None = None def __lowerCamelCase ( self ): return get_height(self.root ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): print('''insert:''' + str(SCREAMING_SNAKE_CASE__ ) ) lowercase : Optional[Any] = insert_node(self.root , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): print('''delete:''' + str(SCREAMING_SNAKE_CASE__ ) ) if self.root is None: print('''Tree is empty!''' ) return lowercase : Dict = del_node(self.root , SCREAMING_SNAKE_CASE__ ) def __str__( self , ): # a level traversale, gives a more intuitive look on the tree lowercase : List[Any] = '''''' lowercase : Optional[Any] = MyQueue() q.push(self.root ) lowercase : int = self.get_height() if layer == 0: return output lowercase : List[str] = 0 while not q.is_empty(): lowercase : Tuple = q.pop() lowercase : Optional[int] = ''' ''' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(SCREAMING_SNAKE_CASE__ ) q.push(SCREAMING_SNAKE_CASE__ ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space lowercase : Tuple = cnt + 1 for i in range(100 ): if cnt == math.pow(2 , SCREAMING_SNAKE_CASE__ ) - 1: lowercase : int = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def __lowercase ( ) ->None: """simple docstring""" import doctest doctest.testmod() if __name__ == "__main__": _test() __a = AVLtree() __a = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
337
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __a = logging.get_logger(__name__) __a = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'deta' A : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ): if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = backbone_config.pop('''model_type''' ) lowercase : Any = CONFIG_MAPPING[backbone_model_type] lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = backbone_config lowercase : Union[str, Any] = num_queries lowercase : Any = max_position_embeddings lowercase : int = d_model lowercase : Any = encoder_ffn_dim lowercase : Optional[int] = encoder_layers lowercase : Tuple = encoder_attention_heads lowercase : Optional[Any] = decoder_ffn_dim lowercase : Optional[int] = decoder_layers lowercase : int = decoder_attention_heads lowercase : Any = dropout lowercase : int = attention_dropout lowercase : Dict = activation_dropout lowercase : int = activation_function lowercase : Dict = init_std lowercase : List[str] = init_xavier_std lowercase : Optional[Any] = encoder_layerdrop lowercase : Tuple = auxiliary_loss lowercase : Tuple = position_embedding_type # deformable attributes lowercase : List[str] = num_feature_levels lowercase : Tuple = encoder_n_points lowercase : Optional[int] = decoder_n_points lowercase : Tuple = two_stage lowercase : Optional[Any] = two_stage_num_proposals lowercase : Union[str, Any] = with_box_refine lowercase : Any = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : str = bbox_cost lowercase : List[Any] = giou_cost # Loss coefficients lowercase : Tuple = mask_loss_coefficient lowercase : Any = dice_loss_coefficient lowercase : Dict = bbox_loss_coefficient lowercase : Tuple = giou_loss_coefficient lowercase : Union[str, Any] = eos_coefficient lowercase : Tuple = focal_alpha super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCamelCase ( self ): return self.encoder_attention_heads @property def __lowerCamelCase ( self ): return self.d_model def __lowerCamelCase ( self ): lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Any = self.backbone_config.to_dict() lowercase : List[str] = self.__class__.model_type return output
337
1
from torch import nn def __lowercase ( _UpperCamelCase ) ->Any: """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"""Unsupported activation function: {act_fn}""" )
337
def __lowercase ( ) ->List[Any]: """simple docstring""" lowercase : Union[str, Any] = 0 for i in range(1, 1001 ): total += i**i return str(_UpperCamelCase )[-10:] if __name__ == "__main__": print(solution())
337
1
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __a = logging.getLogger(__name__) @dataclass class __SCREAMING_SNAKE_CASE : A : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) A : Optional[str] = field( default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A : Optional[str] = field( default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} ) A : Optional[str] = field( default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A : bool = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. A : Optional[str] = field( default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class __SCREAMING_SNAKE_CASE : A : str = field( metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} ) A : Optional[str] = field( default=A__ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , ) A : int = field( default=128 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A : bool = field( default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def __lowercase ( ) ->str: """simple docstring""" lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase , lowercase , lowercase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase , lowercase , lowercase : Union[str, Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) lowercase : List[Any] = import_module('''tasks''' ) try: lowercase : Tuple = getattr(_UpperCamelCase, model_args.task_type ) lowercase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''', _UpperCamelCase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task lowercase : Optional[Any] = token_classification_task.get_labels(data_args.labels ) lowercase : Dict[int, str] = dict(enumerate(_UpperCamelCase ) ) lowercase : int = len(_UpperCamelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_UpperCamelCase, idalabel=_UpperCamelCase, labelaid={label: i for i, label in enumerate(_UpperCamelCase )}, cache_dir=model_args.cache_dir, ) lowercase : List[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, ) lowercase : int = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, ) # Get datasets lowercase : List[Any] = ( TokenClassificationDataset( token_classification_task=_UpperCamelCase, data_dir=data_args.data_dir, tokenizer=_UpperCamelCase, labels=_UpperCamelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) lowercase : Optional[Any] = ( TokenClassificationDataset( token_classification_task=_UpperCamelCase, data_dir=data_args.data_dir, tokenizer=_UpperCamelCase, labels=_UpperCamelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def align_predictions(_UpperCamelCase, _UpperCamelCase ) -> Tuple[List[int], List[int]]: lowercase : List[Any] = np.argmax(_UpperCamelCase, axis=2 ) lowercase , lowercase : Union[str, Any] = preds.shape lowercase : Optional[Any] = [[] for _ in range(_UpperCamelCase )] lowercase : List[str] = [[] for _ in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_UpperCamelCase ) -> Dict: lowercase , lowercase : Union[str, Any] = align_predictions(p.predictions, p.label_ids ) return { "accuracy_score": accuracy_score(_UpperCamelCase, _UpperCamelCase ), "precision": precision_score(_UpperCamelCase, _UpperCamelCase ), "recall": recall_score(_UpperCamelCase, _UpperCamelCase ), "f1": fa_score(_UpperCamelCase, _UpperCamelCase ), } # Data collator lowercase : List[str] = DataCollatorWithPadding(_UpperCamelCase, pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase : Dict = Trainer( model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=_UpperCamelCase, eval_dataset=_UpperCamelCase, compute_metrics=_UpperCamelCase, data_collator=_UpperCamelCase, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase : Tuple = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase : Dict = trainer.evaluate() lowercase : Optional[int] = os.path.join(training_args.output_dir, '''eval_results.txt''' ) if trainer.is_world_process_zero(): with open(_UpperCamelCase, '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''', _UpperCamelCase, _UpperCamelCase ) writer.write('''%s = %s\n''' % (key, value) ) results.update(_UpperCamelCase ) # Predict if training_args.do_predict: lowercase : Tuple = TokenClassificationDataset( token_classification_task=_UpperCamelCase, data_dir=data_args.data_dir, tokenizer=_UpperCamelCase, labels=_UpperCamelCase, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, ) lowercase , lowercase , lowercase : Dict = trainer.predict(_UpperCamelCase ) lowercase , lowercase : List[str] = align_predictions(_UpperCamelCase, _UpperCamelCase ) lowercase : List[Any] = os.path.join(training_args.output_dir, '''test_results.txt''' ) if trainer.is_world_process_zero(): with open(_UpperCamelCase, '''w''' ) as writer: for key, value in metrics.items(): logger.info(''' %s = %s''', _UpperCamelCase, _UpperCamelCase ) writer.write('''%s = %s\n''' % (key, value) ) # Save predictions lowercase : Optional[int] = os.path.join(training_args.output_dir, '''test_predictions.txt''' ) if trainer.is_world_process_zero(): with open(_UpperCamelCase, '''w''' ) as writer: with open(os.path.join(data_args.data_dir, '''test.txt''' ), '''r''' ) as f: token_classification_task.write_predictions_to_file(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) return results def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" main() if __name__ == "__main__": main()
337
import os import re import shutil import sys import tempfile import unittest import black __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __a = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): lowercase : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) lowercase : Any = self.diffusers_dir shutil.copy( os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCamelCase ( self ): lowercase : List[Any] = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with a really long name lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
337
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __a = { '''configuration_blenderbot_small''': [ '''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotSmallConfig''', '''BlenderbotSmallOnnxConfig''', ], '''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['''BlenderbotSmallTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotSmallForCausalLM''', '''BlenderbotSmallForConditionalGeneration''', '''BlenderbotSmallModel''', '''BlenderbotSmallPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TFBlenderbotSmallForConditionalGeneration''', '''TFBlenderbotSmallModel''', '''TFBlenderbotSmallPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''FlaxBlenderbotSmallForConditionalGeneration''', '''FlaxBlenderbotSmallModel''', '''FlaxBlenderbotSmallPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
import math class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1 lowercase : List[Any] = n lowercase : List[Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # adjacency matrix for weight lowercase : Union[str, Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # dp[i][j] stores minimum distance from i to j def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : int = w def __lowerCamelCase ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.dp[u][v] if __name__ == "__main__": __a = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
337
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''microsoft/unispeech-sat-base-100h-libri-ft''': ( '''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json''' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class __SCREAMING_SNAKE_CASE ( A__ ): A : Union[str, Any] = 'unispeech-sat' def __init__( self , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-5 , SCREAMING_SNAKE_CASE__="group" , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=(512, 512, 512, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE__=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE__=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.05 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=10 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=320 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__="mean" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=(512, 512, 512, 512, 1500) , SCREAMING_SNAKE_CASE__=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE__=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=504 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = hidden_size lowercase : Optional[int] = feat_extract_norm lowercase : Dict = feat_extract_activation lowercase : int = list(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = list(SCREAMING_SNAKE_CASE__ ) lowercase : Dict = list(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = conv_bias lowercase : Any = num_conv_pos_embeddings lowercase : Any = num_conv_pos_embedding_groups lowercase : int = len(self.conv_dim ) lowercase : Union[str, Any] = num_hidden_layers lowercase : int = intermediate_size lowercase : Optional[int] = hidden_act lowercase : List[str] = num_attention_heads lowercase : Any = hidden_dropout lowercase : List[Any] = attention_dropout lowercase : Any = activation_dropout lowercase : int = feat_proj_dropout lowercase : str = final_dropout lowercase : Optional[Any] = layerdrop lowercase : Optional[Any] = layer_norm_eps lowercase : Union[str, Any] = initializer_range lowercase : List[str] = vocab_size lowercase : List[Any] = num_clusters lowercase : Union[str, Any] = do_stable_layer_norm lowercase : List[str] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase : Union[str, Any] = apply_spec_augment lowercase : Optional[int] = mask_time_prob lowercase : Any = mask_time_length lowercase : Dict = mask_time_min_masks lowercase : List[str] = mask_feature_prob lowercase : List[str] = mask_feature_length lowercase : Union[str, Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowercase : List[str] = num_codevectors_per_group lowercase : str = num_codevector_groups lowercase : List[Any] = contrastive_logits_temperature lowercase : Union[str, Any] = feat_quantizer_dropout lowercase : str = num_negatives lowercase : Dict = codevector_dim lowercase : Tuple = proj_codevector_dim lowercase : Optional[int] = diversity_loss_weight # ctc loss lowercase : Any = ctc_loss_reduction lowercase : Any = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowercase : Dict = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowercase : Optional[Any] = list(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = list(SCREAMING_SNAKE_CASE__ ) lowercase : int = list(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = xvector_output_dim @property def __lowerCamelCase ( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
337
from __future__ import annotations def __lowercase ( _UpperCamelCase ) ->float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(_UpperCamelCase ) / len(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
337
1
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" lowercase : Any = prime_factors(_UpperCamelCase ) if is_square_free(_UpperCamelCase ): return -1 if len(_UpperCamelCase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
337
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __a = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
337
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __a = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): A : Tuple = DebertaVaTokenizer A : int = DebertaVaTokenizerFast A : Optional[Any] = True A : List[str] = True def __lowerCamelCase ( self ): super().setUp() # We have a SentencePiece fixture for testing lowercase : Optional[Any] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ , unk_token='''<unk>''' ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = '''this is a test''' lowercase : str = '''this is a test''' return input_text, output_text def __lowerCamelCase ( self ): lowercase : List[Any] = '''<pad>''' lowercase : Dict = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''<unk>''' ) self.assertEqual(vocab_keys[-1] , '''[PAD]''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 30001 ) def __lowerCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def __lowerCamelCase ( self ): # fmt: off lowercase : List[str] = ''' \tHeLLo!how \n Are yoU? ''' lowercase : Optional[int] = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on lowercase : Tuple = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : str = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def __lowerCamelCase ( self ): pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): # fmt: off lowercase : List[str] = '''I was born in 92000, and this is falsé.''' lowercase : Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase : Optional[int] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # fmt: off lowercase : Optional[Any] = '''I was born in 92000, and this is falsé.''' lowercase : int = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase : List[Any] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # fmt: off lowercase : List[str] = '''I was born in 92000, and this is falsé.''' lowercase : Tuple = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase : Optional[Any] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # fmt: off lowercase : Dict = '''I was born in 92000, and this is falsé.''' lowercase : Tuple = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on lowercase : Optional[int] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # fmt: off lowercase : Any = ''' \tHeLLo!how \n Are yoU? ''' lowercase : Union[str, Any] = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on lowercase : Any = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Dict = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , split_by_punct=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.get_tokenizer() lowercase : int = self.get_rust_tokenizer() lowercase : Tuple = '''I was born in 92000, and this is falsé.''' lowercase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = self.get_rust_tokenizer() lowercase : str = tokenizer.encode(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Any = '''This is a test''' lowercase : Dict = [13, 1, 4398, 25, 21, 1289] lowercase : Optional[Any] = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase : str = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] lowercase : Union[str, Any] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : int = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # fmt: off lowercase : Optional[int] = '''I was born in 92000, and this is falsé.''' lowercase : str = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] lowercase : List[str] = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] lowercase : Dict = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on lowercase : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Any = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Any = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = tokenizer.encode('''sequence builders''' ) lowercase : str = tokenizer.encode('''multi-sequence build''' ) lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ) lowercase : str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , SCREAMING_SNAKE_CASE__ ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , SCREAMING_SNAKE_CASE__ , ) @slow def __lowerCamelCase ( self ): # fmt: off lowercase : str = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
337
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __a = logging.get_logger(__name__) def __lowercase ( _UpperCamelCase ) ->List[int]: """simple docstring""" if isinstance(_UpperCamelCase, np.ndarray ): return list(tensor.shape ) lowercase : Optional[Any] = tf.shape(_UpperCamelCase ) if tensor.shape == tf.TensorShape(_UpperCamelCase ): return dynamic lowercase : Tuple = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )] def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase : int = [1] * inputs.shape.rank lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis] lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase ) lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase ) # Compute layer normalization using the batch_normalization # function. lowercase : List[str] = tf.nn.batch_normalization( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, ) return outputs def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase : Dict = tf.shape(_UpperCamelCase ) lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 ) return tf.reshape(_UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->tf.Tensor: """simple docstring""" if not isinstance(_UpperCamelCase, tf.Tensor ): lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase : str = ( tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None: """simple docstring""" tf.debugging.assert_less( _UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ), ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : List[Any] = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) lowercase : Any = np.asarray(_UpperCamelCase ) lowercase : List[Any] = 1 lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_UpperCamelCase ): lowercase : Optional[int] = chunk_data else: lowercase : int = data def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" if name in group.attrs: lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]] else: lowercase : Optional[Any] = [] lowercase : List[str] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def __lowercase ( _UpperCamelCase ) ->List[str]: """simple docstring""" def _expand_single_ad_tensor(_UpperCamelCase ): if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_UpperCamelCase, axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
337
1
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort __a = logging.get_logger(__name__) __a = { '''tensor(bool)''': np.bool_, '''tensor(int8)''': np.inta, '''tensor(uint8)''': np.uinta, '''tensor(int16)''': np.intaa, '''tensor(uint16)''': np.uintaa, '''tensor(int32)''': np.intaa, '''tensor(uint32)''': np.uintaa, '''tensor(int64)''': np.intaa, '''tensor(uint64)''': np.uintaa, '''tensor(float16)''': np.floataa, '''tensor(float)''': np.floataa, '''tensor(double)''': np.floataa, } class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ): logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' ) lowercase : int = model lowercase : List[str] = kwargs.get('''model_save_dir''' , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = kwargs.get('''latest_model_name''' , SCREAMING_SNAKE_CASE__ ) def __call__( self , **SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = {k: np.array(SCREAMING_SNAKE_CASE__ ) for k, v in kwargs.items()} return self.model.run(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @staticmethod def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ): if provider is None: logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' ) lowercase : Optional[Any] = '''CPUExecutionProvider''' return ort.InferenceSession(SCREAMING_SNAKE_CASE__ , providers=[provider] , sess_options=SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ ): lowercase : Dict = file_name if file_name is not None else ONNX_WEIGHTS_NAME lowercase : Union[str, Any] = self.model_save_dir.joinpath(self.latest_model_name ) lowercase : Any = Path(SCREAMING_SNAKE_CASE__ ).joinpath(SCREAMING_SNAKE_CASE__ ) try: shutil.copyfile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) lowercase : int = self.model_save_dir.joinpath(SCREAMING_SNAKE_CASE__ ) if src_path.exists(): lowercase : int = Path(SCREAMING_SNAKE_CASE__ ).joinpath(SCREAMING_SNAKE_CASE__ ) try: shutil.copyfile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except shutil.SameFileError: pass def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ): if os.path.isfile(SCREAMING_SNAKE_CASE__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) # saving model weights/files self._save_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @classmethod def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): lowercase : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = OnnxRuntimeModel.load_model( os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , provider=SCREAMING_SNAKE_CASE__ , sess_options=SCREAMING_SNAKE_CASE__ ) lowercase : int = Path(SCREAMING_SNAKE_CASE__ ) # load model from hub else: # download model lowercase : int = hf_hub_download( repo_id=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , ) lowercase : str = Path(SCREAMING_SNAKE_CASE__ ).parent lowercase : Optional[int] = Path(SCREAMING_SNAKE_CASE__ ).name lowercase : Dict = OnnxRuntimeModel.load_model(SCREAMING_SNAKE_CASE__ , provider=SCREAMING_SNAKE_CASE__ , sess_options=SCREAMING_SNAKE_CASE__ ) return cls(model=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @classmethod def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ): lowercase : int = None if len(str(SCREAMING_SNAKE_CASE__ ).split('''@''' ) ) == 2: lowercase , lowercase : List[Any] = model_id.split('''@''' ) return cls._from_pretrained( model_id=SCREAMING_SNAKE_CASE__ , revision=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
337
def __lowercase ( _UpperCamelCase = 4000000 ) ->int: """simple docstring""" lowercase : int = [] lowercase , lowercase : str = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCamelCase ) lowercase , lowercase : Dict = b, a + b return sum(_UpperCamelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
337
1
import os def __lowercase ( _UpperCamelCase = "input.txt" ) ->int: """simple docstring""" with open(os.path.join(os.path.dirname(_UpperCamelCase ), _UpperCamelCase ) ) as input_file: lowercase : List[str] = [ [int(_UpperCamelCase ) for element in line.split(''',''' )] for line in input_file.readlines() ] lowercase : List[str] = len(_UpperCamelCase ) lowercase : List[str] = len(matrix[0] ) lowercase : List[str] = [[-1 for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): lowercase : Optional[int] = matrix[i][0] for j in range(1, _UpperCamelCase ): for i in range(_UpperCamelCase ): lowercase : Dict = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1, _UpperCamelCase ): lowercase : Optional[int] = min( minimal_path_sums[i][j], minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2, -1, -1 ): lowercase : int = min( minimal_path_sums[i][j], minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F'''{solution() = }''')
337
from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __a = logging.get_logger(__name__) __a = { '''deepmind/language-perceiver''': '''https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json''', # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'perceiver' def __init__( self , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=1280 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=26 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="kv" , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=262 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=56 , SCREAMING_SNAKE_CASE__=[368, 496] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=1920 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=[1, 16, 224, 224] , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Any = num_latents lowercase : Union[str, Any] = d_latents lowercase : str = d_model lowercase : int = num_blocks lowercase : str = num_self_attends_per_block lowercase : List[str] = num_self_attention_heads lowercase : List[str] = num_cross_attention_heads lowercase : int = qk_channels lowercase : List[Any] = v_channels lowercase : int = cross_attention_shape_for_attention lowercase : Tuple = self_attention_widening_factor lowercase : Dict = cross_attention_widening_factor lowercase : Any = hidden_act lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : Union[str, Any] = initializer_range lowercase : Any = layer_norm_eps lowercase : Any = use_query_residual # masked language modeling attributes lowercase : List[str] = vocab_size lowercase : Dict = max_position_embeddings # image classification attributes lowercase : int = image_size # flow attributes lowercase : List[Any] = train_size # multimodal autoencoding attributes lowercase : List[Any] = num_frames lowercase : Union[str, Any] = audio_samples_per_frame lowercase : int = samples_per_patch lowercase : Optional[int] = output_shape class __SCREAMING_SNAKE_CASE ( A__ ): @property def __lowerCamelCase ( self ): if self.task == "multiple-choice": lowercase : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase : Dict = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''inputs''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] ) @property def __lowerCamelCase ( self ): return 1E-4 def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = -1 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = 3 , SCREAMING_SNAKE_CASE__ = 40 , SCREAMING_SNAKE_CASE__ = 40 , ): # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : str = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase : Union[str, Any] = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = compute_effective_axis_dimension( SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=SCREAMING_SNAKE_CASE__ ) # Generate dummy inputs according to compute batch and sequence lowercase : Optional[Any] = [''' '''.join(['''a'''] ) * seq_length] * batch_size lowercase : Any = dict(preprocessor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''input_ids''' ) return inputs elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase : List[str] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ , fixed_dimension=OnnxConfig.default_fixed_batch ) lowercase : List[str] = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ ) ) lowercase : Union[str, Any] = inputs.pop('''pixel_values''' ) return inputs else: raise ValueError( '''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
337
1
import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging __a = logging.get_logger(__name__) def __lowercase ( _UpperCamelCase=None, _UpperCamelCase=None ) ->List[Any]: """simple docstring""" return field(default_factory=lambda: default, metadata=_UpperCamelCase ) @dataclass class __SCREAMING_SNAKE_CASE : A : List[str] = list_field( default=[] , metadata={ 'help': ( 'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version' ' of all available models' ) } , ) A : List[int] = list_field( default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} ) A : List[int] = list_field( default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , ) A : bool = field( default=A__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , ) A : bool = field( default=A__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , ) A : bool = field( default=A__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} ) A : bool = field(default=A__ , metadata={'help': 'Use FP16 to accelerate inference.'} ) A : bool = field(default=A__ , metadata={'help': 'Benchmark training of model'} ) A : bool = field(default=A__ , metadata={'help': 'Verbose memory tracing'} ) A : bool = field( default=A__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , ) A : bool = field( default=A__ , metadata={ 'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory' } , ) A : bool = field(default=A__ , metadata={'help': 'Trace memory line by line'} ) A : bool = field(default=A__ , metadata={'help': 'Save result to a CSV file'} ) A : bool = field(default=A__ , metadata={'help': 'Save all print statements in a log file'} ) A : bool = field(default=A__ , metadata={'help': 'Whether to print environment information'} ) A : bool = field( default=A__ , metadata={ 'help': ( 'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use' ' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled' ' for debugging / testing and on TPU.' ) } , ) A : str = field( default=F'inference_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv.'} , ) A : str = field( default=F'inference_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv.'} , ) A : str = field( default=F'train_time_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , ) A : str = field( default=F'train_memory_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , ) A : str = field( default=F'env_info_{round(time() )}.csv' , metadata={'help': 'CSV filename used if saving environment information.'} , ) A : str = field( default=F'log_{round(time() )}.csv' , metadata={'help': 'Log filename used if print statements are saved in log.'} , ) A : int = field(default=3 , metadata={'help': 'Times an experiment will be run.'} ) A : bool = field( default=A__ , metadata={ 'help': ( 'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain' ' model weights.' ) } , ) def __lowerCamelCase ( self ): warnings.warn( f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils""" ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , SCREAMING_SNAKE_CASE__ , ) def __lowerCamelCase ( self ): return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __lowerCamelCase ( self ): if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def __lowerCamelCase ( self ): if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
337
import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def __lowercase ( _UpperCamelCase = 8 ) ->str: """simple docstring""" lowercase : List[str] = ascii_letters + digits + punctuation return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str: """simple docstring""" i -= len(_UpperCamelCase ) lowercase : Dict = i // 3 lowercase : List[str] = i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) lowercase : Union[str, Any] = ( chars_incl + random(_UpperCamelCase, quotient + remainder ) + random(_UpperCamelCase, _UpperCamelCase ) + random(_UpperCamelCase, _UpperCamelCase ) ) lowercase : Union[str, Any] = list(_UpperCamelCase ) shuffle(_UpperCamelCase ) return "".join(_UpperCamelCase ) # random is a generalised function for letters, characters and numbers def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->str: """simple docstring""" return "".join(secrets.choice(_UpperCamelCase ) for _ in range(_UpperCamelCase ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Dict: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]: """simple docstring""" pass # Put your code here... def __lowercase ( _UpperCamelCase, _UpperCamelCase = 8 ) ->bool: """simple docstring""" if len(_UpperCamelCase ) < min_length: # Your Password must be at least 8 characters long return False lowercase : str = any(char in ascii_uppercase for char in password ) lowercase : List[str] = any(char in ascii_lowercase for char in password ) lowercase : Dict = any(char in digits for char in password ) lowercase : Tuple = any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def __lowercase ( ) ->Dict: """simple docstring""" lowercase : Union[str, Any] = int(input('''Please indicate the max length of your password: ''' ).strip() ) lowercase : Optional[Any] = input( '''Please indicate the characters that must be in your password: ''' ).strip() print('''Password generated:''', password_generator(_UpperCamelCase ) ) print( '''Alternative Password generated:''', alternative_password_generator(_UpperCamelCase, _UpperCamelCase ), ) print('''[If you are thinking of using this passsword, You better save it.]''' ) if __name__ == "__main__": main()
337
1
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {'''vocab_file''': '''vocab.json'''} __a = { '''vocab_file''': { '''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''', } } __a = {'''mgp-str''': 27} class __SCREAMING_SNAKE_CASE ( A__ ): A : Optional[int] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="[GO]" , SCREAMING_SNAKE_CASE__="[GO]" , SCREAMING_SNAKE_CASE__="[s]" , SCREAMING_SNAKE_CASE__="[GO]" , **SCREAMING_SNAKE_CASE__ ): super().__init__( unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: lowercase : Dict = json.load(SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = {v: k for k, v in self.vocab.items()} @property def __lowerCamelCase ( self ): return len(self.vocab ) def __lowerCamelCase ( self ): return dict(self.vocab , **self.added_tokens_encoder ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = [] for s in text: char_tokens.extend(SCREAMING_SNAKE_CASE__ ) return char_tokens def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return self.vocab.get(SCREAMING_SNAKE_CASE__ , self.vocab.get(self.unk_token ) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return self.decoder.get(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) ) return lowercase : str = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) return (vocab_file,)
337
from __future__ import annotations __a = [] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" for i in range(len(_UpperCamelCase ) ): if board[row][i] == 1: return False for i in range(len(_UpperCamelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ): if board[i][j] == 1: return False return True def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool: """simple docstring""" if row >= len(_UpperCamelCase ): solution.append(_UpperCamelCase ) printboard(_UpperCamelCase ) print() return True for i in range(len(_UpperCamelCase ) ): if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : int = 1 solve(_UpperCamelCase, row + 1 ) lowercase : Tuple = 0 return False def __lowercase ( _UpperCamelCase ) ->None: """simple docstring""" for i in range(len(_UpperCamelCase ) ): for j in range(len(_UpperCamelCase ) ): if board[i][j] == 1: print('''Q''', end=''' ''' ) else: print('''.''', end=''' ''' ) print() # n=int(input("The no. of queens")) __a = 8 __a = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('''The total no. of solutions are :''', len(solution))
337
1
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __a = '''src/diffusers''' __a = '''.''' # This is to make sure the diffusers module imported is the one in the repo. __a = importlib.util.spec_from_file_location( '''diffusers''', os.path.join(DIFFUSERS_PATH, '''__init__.py'''), submodule_search_locations=[DIFFUSERS_PATH], ) __a = spec.loader.load_module() def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]: """simple docstring""" return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''', _UpperCamelCase ) is not None def __lowercase ( _UpperCamelCase ) ->Optional[Any]: """simple docstring""" lowercase : List[str] = object_name.split('''.''' ) lowercase : List[str] = 0 # First let's find the module where our object lives. lowercase : Dict = parts[i] while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase, f"""{module}.py""" ) ): i += 1 if i < len(_UpperCamelCase ): lowercase : int = os.path.join(_UpperCamelCase, parts[i] ) if i >= len(_UpperCamelCase ): raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" ) with open(os.path.join(_UpperCamelCase, f"""{module}.py""" ), '''r''', encoding='''utf-8''', newline='''\n''' ) as f: lowercase : List[Any] = f.readlines() # Now let's find the class / func in the code! lowercase : Optional[Any] = '''''' lowercase : Optional[Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(_UpperCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(_UpperCamelCase ): raise ValueError(f""" {object_name} does not match any function or class in {module}.""" ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). lowercase : Tuple = line_index while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index], _UpperCamelCase ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowercase : str = lines[start_index:line_index] return "".join(_UpperCamelCase ) __a = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''') __a = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''') __a = re.compile(r'''<FILL\s+[^>]*>''') def __lowercase ( _UpperCamelCase ) ->List[str]: """simple docstring""" lowercase : str = code.split('''\n''' ) lowercase : str = 0 while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0: idx += 1 if idx < len(_UpperCamelCase ): return re.search(R'''^(\s*)\S''', lines[idx] ).groups()[0] return "" def __lowercase ( _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : List[Any] = len(get_indent(_UpperCamelCase ) ) > 0 if has_indent: lowercase : Dict = f"""class Bla:\n{code}""" lowercase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=_UpperCamelCase ) lowercase : List[Any] = black.format_str(_UpperCamelCase, mode=_UpperCamelCase ) lowercase , lowercase : str = style_docstrings_in_code(_UpperCamelCase ) return result[len('''class Bla:\n''' ) :] if has_indent else result def __lowercase ( _UpperCamelCase, _UpperCamelCase=False ) ->Optional[Any]: """simple docstring""" with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f: lowercase : List[Any] = f.readlines() lowercase : Tuple = [] lowercase : List[Any] = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(_UpperCamelCase ): lowercase : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. lowercase , lowercase , lowercase : Any = search.groups() lowercase : Any = find_code_in_diffusers(_UpperCamelCase ) lowercase : Dict = get_indent(_UpperCamelCase ) lowercase : Optional[int] = line_index + 1 if indent == theoretical_indent else line_index + 2 lowercase : int = theoretical_indent lowercase : Union[str, Any] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. lowercase : Optional[Any] = True while line_index < len(_UpperCamelCase ) and should_continue: line_index += 1 if line_index >= len(_UpperCamelCase ): break lowercase : List[Any] = lines[line_index] lowercase : str = _should_continue(_UpperCamelCase, _UpperCamelCase ) and re.search(f"""^{indent}# End copy""", _UpperCamelCase ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowercase : List[str] = lines[start_index:line_index] lowercase : Tuple = ''''''.join(_UpperCamelCase ) # Remove any nested `Copied from` comments to avoid circular copies lowercase : Dict = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_UpperCamelCase ) is None] lowercase : Dict = '''\n'''.join(_UpperCamelCase ) # Before comparing, use the `replace_pattern` on the original code. if len(_UpperCamelCase ) > 0: lowercase : Any = replace_pattern.replace('''with''', '''''' ).split(''',''' ) lowercase : Dict = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns] for pattern in patterns: if pattern is None: continue lowercase , lowercase , lowercase : Any = pattern.groups() lowercase : int = re.sub(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) if option.strip() == "all-casing": lowercase : List[str] = re.sub(obja.lower(), obja.lower(), _UpperCamelCase ) lowercase : Optional[int] = re.sub(obja.upper(), obja.upper(), _UpperCamelCase ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line lowercase : List[Any] = blackify(lines[start_index - 1] + theoretical_code ) lowercase : List[str] = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: lowercase : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] lowercase : Optional[Any] = start_index + 1 if overwrite and len(_UpperCamelCase ) > 0: # Warn the user a file has been modified. print(f"""Detected changes, rewriting {filename}.""" ) with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f: f.writelines(_UpperCamelCase ) return diffs def __lowercase ( _UpperCamelCase = False ) ->Any: """simple docstring""" lowercase : Dict = glob.glob(os.path.join(_UpperCamelCase, '''**/*.py''' ), recursive=_UpperCamelCase ) lowercase : Dict = [] for filename in all_files: lowercase : Any = is_copy_consistent(_UpperCamelCase, _UpperCamelCase ) diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs] if not overwrite and len(_UpperCamelCase ) > 0: lowercase : Tuple = '''\n'''.join(_UpperCamelCase ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') __a = parser.parse_args() check_copies(args.fix_and_overwrite)
337
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
1
from collections import Counter import numpy as np from sklearn import datasets from sklearn.model_selection import train_test_split __a = datasets.load_iris() __a = np.array(data['''data''']) __a = np.array(data['''target''']) __a = data['''target_names'''] __a , __a , __a , __a = train_test_split(X, y) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Optional[Any]: """simple docstring""" return np.linalg.norm(np.array(_UpperCamelCase ) - np.array(_UpperCamelCase ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=5 ) ->str: """simple docstring""" lowercase : Optional[int] = zip(_UpperCamelCase, _UpperCamelCase ) # List of distances of all points from the point to be classified lowercase : List[str] = [] for data_point in data: lowercase : List[Any] = euclidean_distance(data_point[0], _UpperCamelCase ) distances.append((distance, data_point[1]) ) # Choosing 'k' points with the least distances. lowercase : Optional[int] = [i[1] for i in sorted(_UpperCamelCase )[:k]] # Most commonly occurring class among them # is the class into which the point is classified lowercase : Dict = Counter(_UpperCamelCase ).most_common(1 )[0][0] return classes[result] if __name__ == "__main__": print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
337
from collections.abc import Callable class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ = None ): # Stores actual heap items. lowercase : list = [] # Stores indexes of each item for supporting updates and deletion. lowercase : dict = {} # Stores current size of heap. lowercase : str = 0 # Stores function used to evaluate the score of an item on which basis ordering # will be done. lowercase : Tuple = key or (lambda SCREAMING_SNAKE_CASE__ : x) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): return int((i - 1) / 2 ) if i > 0 else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 1 ) return left if 0 < left < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = int(2 * i + 2 ) return right if 0 < right < self.size else None def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : Dict = ( self.pos_map[self.arr[j][0]], self.pos_map[self.arr[i][0]], ) # Then swap the items in the list. lowercase , lowercase : int = self.arr[j], self.arr[i] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.arr[i][1] < self.arr[j][1] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : int = self._left(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = self._right(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = i if left is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = left if right is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : List[str] = right return valid_parent def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = self._parent(SCREAMING_SNAKE_CASE__ ) while parent is not None and not self._cmp(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : Optional[int] = parent, self._parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Dict = self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) while valid_parent != index: self._swap(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) lowercase , lowercase : str = valid_parent, self._get_valid_parent(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : str = self.pos_map[item] lowercase : Optional[int] = [item, self.key(SCREAMING_SNAKE_CASE__ )] # Make sure heap is right in both up and down direction. # Ideally only one of them will make any change. self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): if item not in self.pos_map: return lowercase : List[str] = self.pos_map[item] del self.pos_map[item] lowercase : Optional[int] = self.arr[self.size - 1] lowercase : int = index self.size -= 1 # Make sure heap is right in both up and down direction. Ideally only one # of them will make any change- so no performance loss in calling both. if self.size > index: self._heapify_up(SCREAMING_SNAKE_CASE__ ) self._heapify_down(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = len(self.arr ) if arr_len == self.size: self.arr.append([item, self.key(SCREAMING_SNAKE_CASE__ )] ) else: lowercase : int = [item, self.key(SCREAMING_SNAKE_CASE__ )] lowercase : str = self.size self.size += 1 self._heapify_up(self.size - 1 ) def __lowerCamelCase ( self ): return self.arr[0] if self.size else None def __lowerCamelCase ( self ): lowercase : str = self.get_top() if top_item_tuple: self.delete_item(top_item_tuple[0] ) return top_item_tuple def __lowercase ( ) ->None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
337
1
from typing import List from .keymap import KEYMAP, get_character def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += [key] setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator def __lowercase ( *_UpperCamelCase ) ->Any: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += keys setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator class __SCREAMING_SNAKE_CASE ( A__ ): def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ): setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} ) setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] ) for key in handled_keys: lowercase : List[Any] = value return new_cls @staticmethod def __lowerCamelCase ( cls ): lowercase : Dict = get_character() if char != KEYMAP["undefined"]: lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ ) if handler: lowercase : Tuple = char return handler(cls ) else: return None def __lowercase ( cls ) ->Any: """simple docstring""" return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
337
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class __SCREAMING_SNAKE_CASE ( A__ ): A : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
337
1
import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __a = '''▁''' __a = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): A : List[Any] = BertGenerationTokenizer A : Tuple = False A : List[str] = True def __lowerCamelCase ( self ): super().setUp() lowercase : str = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowerCamelCase ( self ): lowercase : int = '''<s>''' lowercase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<pad>''' ) self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 1002 ) def __lowerCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1000 ) def __lowerCamelCase ( self ): lowercase : List[Any] = BertGenerationTokenizer(SCREAMING_SNAKE_CASE__ , keep_accents=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [285, 46, 10, 170, 382] , ) lowercase : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowercase : List[Any] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ) self.assertListEqual( SCREAMING_SNAKE_CASE__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) @cached_property def __lowerCamelCase ( self ): return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) @slow def __lowerCamelCase ( self ): lowercase : int = '''Hello World!''' lowercase : Optional[Any] = [18536, 2260, 101] self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) @slow def __lowerCamelCase ( self ): lowercase : Dict = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) lowercase : Any = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(SCREAMING_SNAKE_CASE__ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) @require_torch @slow def __lowerCamelCase ( self ): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence lowercase : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[int] = self.big_tokenizer.batch_encode_plus( [sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = BertGenerationConfig() lowercase : Union[str, Any] = BertGenerationEncoder(SCREAMING_SNAKE_CASE__ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**SCREAMING_SNAKE_CASE__ ) model(**SCREAMING_SNAKE_CASE__ ) @slow def __lowerCamelCase ( self ): # fmt: off lowercase : Optional[int] = {'''input_ids''': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
337
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __a = logging.get_logger(__name__) __a = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __a = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } __a = {'''facebook/blenderbot-3B''': 1_28} class __SCREAMING_SNAKE_CASE ( A__ ): A : Dict = VOCAB_FILES_NAMES A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[int] = ['input_ids', 'attention_mask'] A : str = BlenderbotTokenizer def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ): super().__init__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) ) lowercase : str = add_prefix_space lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = add_prefix_space lowercase : str = '''post_processor''' lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if tokenizer_component_instance: lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase : Tuple = tuple(state['''sep'''] ) if "cls" in state: lowercase : Union[str, Any] = tuple(state['''cls'''] ) lowercase : Optional[int] = False if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space: lowercase : Any = add_prefix_space lowercase : Tuple = True if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets: lowercase : List[str] = trim_offsets lowercase : Optional[int] = True if changes_to_apply: lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) ) lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ ) setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def __lowerCamelCase ( self ): if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value lowercase : Any = value def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : Tuple = [self.sep_token_id] lowercase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): return token_ids_a + [self.eos_token_id] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ): lowercase : Any = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ ) lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length: lowercase : Tuple = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
337
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { '''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''], '''tokenization_ctrl''': ['''CTRLTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CTRLForSequenceClassification''', '''CTRLLMHeadModel''', '''CTRLModel''', '''CTRLPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ '''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFCTRLForSequenceClassification''', '''TFCTRLLMHeadModel''', '''TFCTRLModel''', '''TFCTRLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
337
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __lowercase ( ) ->int: """simple docstring""" lowercase : Tuple = HfArgumentParser(_UpperCamelCase ) lowercase : List[str] = parser.parse_args_into_dataclasses()[0] lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase ) try: lowercase : Any = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] ) lowercase : Any = '''''' lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] ) lowercase : List[str] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase ) raise ValueError(_UpperCamelCase ) benchmark.run() if __name__ == "__main__": main()
337
1
import math def __lowercase ( _UpperCamelCase ) ->bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(_UpperCamelCase ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __lowercase ( _UpperCamelCase = 0.1 ) ->int: """simple docstring""" lowercase : int = 3 lowercase : int = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1, (j + 2) * (j + 2), j + 1 ): primes += is_prime(_UpperCamelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
337
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : Union[str, Any] = [False] * len(_UpperCamelCase ) lowercase : Optional[int] = [] queue.append(_UpperCamelCase ) lowercase : Union[str, Any] = True while queue: lowercase : List[str] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(_UpperCamelCase ) lowercase : Tuple = True lowercase : Optional[Any] = u return visited[t] def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" lowercase : List[str] = [-1] * (len(_UpperCamelCase )) lowercase : int = 0 while bfs(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ): lowercase : List[str] = float('''Inf''' ) lowercase : int = sink while s != source: # Find the minimum value in select path lowercase : List[Any] = min(_UpperCamelCase, graph[parent[s]][s] ) lowercase : Union[str, Any] = parent[s] max_flow += path_flow lowercase : Optional[int] = sink while v != source: lowercase : Any = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowercase : Union[str, Any] = parent[v] return max_flow __a = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __a , __a = 0, 5 print(ford_fulkerson(graph, source, sink))
337
1
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py __a = '''\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation", author = "Lin, Chin-Yew and Och, Franz Josef", booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics", month = "aug 23{--}aug 27", year = "2004", address = "Geneva, Switzerland", publisher = "COLING", url = "https://www.aclweb.org/anthology/C04-1072", pages = "501--507", } ''' __a = '''\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation, the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. ''' __a = ''' Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: \'bleu\': bleu score, \'precisions\': geometric mean of n-gram precisions, \'brevity_penalty\': brevity penalty, \'length_ratio\': ratio of lengths, \'translation_length\': translation_length, \'reference_length\': reference_length Examples: >>> predictions = [ ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample ... ] >>> references = [ ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references) ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric("bleu") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results["bleu"]) 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __SCREAMING_SNAKE_CASE ( datasets.Metric ): def __lowerCamelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=False ): lowercase : Optional[int] = compute_bleu( reference_corpus=SCREAMING_SNAKE_CASE__ , translation_corpus=SCREAMING_SNAKE_CASE__ , max_order=SCREAMING_SNAKE_CASE__ , smooth=SCREAMING_SNAKE_CASE__ ) ((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : List[Any] = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
337
from typing import List from .keymap import KEYMAP, get_character def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : str = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += [key] setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator def __lowercase ( *_UpperCamelCase ) ->Any: """simple docstring""" def decorator(_UpperCamelCase ): lowercase : List[Any] = getattr(_UpperCamelCase, '''handle_key''', [] ) handle += keys setattr(_UpperCamelCase, '''handle_key''', _UpperCamelCase ) return func return decorator class __SCREAMING_SNAKE_CASE ( A__ ): def __new__( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : str = super().__new__(cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if not hasattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' ): setattr(SCREAMING_SNAKE_CASE__ , '''key_handler''' , {} ) setattr(SCREAMING_SNAKE_CASE__ , '''handle_input''' , KeyHandler.handle_input ) for value in attrs.values(): lowercase : Dict = getattr(SCREAMING_SNAKE_CASE__ , '''handle_key''' , [] ) for key in handled_keys: lowercase : List[Any] = value return new_cls @staticmethod def __lowerCamelCase ( cls ): lowercase : Dict = get_character() if char != KEYMAP["undefined"]: lowercase : Optional[int] = ord(SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = cls.key_handler.get(SCREAMING_SNAKE_CASE__ ) if handler: lowercase : Tuple = char return handler(cls ) else: return None def __lowercase ( cls ) ->Any: """simple docstring""" return KeyHandler(cls.__name__, cls.__bases__, cls.__dict__.copy() )
337
1
from __future__ import annotations class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = TypeError( '''Matrices must be formed from a list of zero or more lists containing at ''' '''least one and the same number of values, each of which must be of type ''' '''int or float.''' ) if len(SCREAMING_SNAKE_CASE__ ) != 0: lowercase : int = len(rows[0] ) if cols == 0: raise error for row in rows: if len(SCREAMING_SNAKE_CASE__ ) != cols: raise error for value in row: if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): raise error lowercase : List[Any] = rows else: lowercase : List[str] = [] def __lowerCamelCase ( self ): return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def __lowerCamelCase ( self ): return len(self.rows ) @property def __lowerCamelCase ( self ): return len(self.rows[0] ) @property def __lowerCamelCase ( self ): return (self.num_rows, self.num_columns) @property def __lowerCamelCase ( self ): return self.order[0] == self.order[1] def __lowerCamelCase ( self ): lowercase : Dict = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def __lowerCamelCase ( self ): return bool(self.determinant() ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(SCREAMING_SNAKE_CASE__ ).determinant() def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if (row + column) % 2 == 0: return self.get_minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return -1 * self.get_minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): return Matrix( [ [self.get_minor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def __lowerCamelCase ( self ): return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def __lowerCamelCase ( self ): lowercase : int = [ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Dict = self.determinant() if not determinant: raise TypeError('''Only matrices with a non-zero determinant have an inverse''' ) return self.adjugate() * (1 / determinant) def __repr__( self ): return str(self.rows ) def __str__( self ): if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ '''[''' + '''. '''.join([str(SCREAMING_SNAKE_CASE__ ) for value in row] ) + '''.]''' for row in self.rows ] ) + "]" ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : str = TypeError('''Row must be a list containing all ints and/or floats''' ) if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise type_error for value in row: if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): raise type_error if len(SCREAMING_SNAKE_CASE__ ) != self.num_columns: raise ValueError( '''Row must be equal in length to the other rows in the matrix''' ) if position is None: self.rows.append(SCREAMING_SNAKE_CASE__ ) else: lowercase : Union[str, Any] = self.rows[0:position] + [row] + self.rows[position:] def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ): lowercase : Dict = TypeError( '''Column must be a list containing all ints and/or floats''' ) if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise type_error for value in column: if not isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): raise type_error if len(SCREAMING_SNAKE_CASE__ ) != self.num_rows: raise ValueError( '''Column must be equal in length to the other columns in the matrix''' ) if position is None: lowercase : Any = [self.rows[i] + [column[i]] for i in range(self.num_rows )] else: lowercase : Optional[Any] = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return NotImplemented return self.rows == other.rows def __ne__( self , SCREAMING_SNAKE_CASE__ ): return not self == other def __neg__( self ): return self * -1 def __add__( self , SCREAMING_SNAKE_CASE__ ): if self.order != other.order: raise ValueError('''Addition requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , SCREAMING_SNAKE_CASE__ ): if self.order != other.order: raise ValueError('''Subtraction requires matrices of the same order''' ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , SCREAMING_SNAKE_CASE__ ): if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if self.num_columns != other.num_rows: raise ValueError( '''The number of columns in the first matrix must ''' '''be equal to the number of rows in the second''' ) return Matrix( [ [Matrix.dot_product(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( '''A Matrix can only be multiplied by an int, float, or another matrix''' ) def __pow__( self , SCREAMING_SNAKE_CASE__ ): if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise TypeError('''A Matrix can only be raised to the power of an int''' ) if not self.is_square: raise ValueError('''Only square matrices can be raised to a power''' ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( '''Only invertable matrices can be raised to a negative power''' ) lowercase : int = self for _ in range(other - 1 ): result *= self return result @classmethod def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return sum(row[i] * column[i] for i in range(len(SCREAMING_SNAKE_CASE__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
337
import logging import os from .state import PartialState class __SCREAMING_SNAKE_CASE ( logging.LoggerAdapter ): @staticmethod def __lowerCamelCase ( SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): if PartialState._shared_state == {}: raise RuntimeError( '''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' ) lowercase : List[str] = kwargs.pop('''main_process_only''' , SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = kwargs.pop('''in_order''' , SCREAMING_SNAKE_CASE__ ) if self.isEnabledFor(SCREAMING_SNAKE_CASE__ ): if self._should_log(SCREAMING_SNAKE_CASE__ ): lowercase , lowercase : str = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) elif in_order: lowercase : List[Any] = PartialState() for i in range(state.num_processes ): if i == state.process_index: lowercase , lowercase : Union[str, Any] = self.process(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) self.logger.log(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) state.wait_for_everyone() def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->List[Any]: """simple docstring""" if log_level is None: lowercase : str = os.environ.get('''ACCELERATE_LOG_LEVEL''', _UpperCamelCase ) lowercase : str = logging.getLogger(_UpperCamelCase ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(_UpperCamelCase, {} )
337
1
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = "arrow" , **SCREAMING_SNAKE_CASE__ , ): super().__init__( split=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ , streaming=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) lowercase : Union[str, Any] = load_from_cache_file lowercase : Optional[Any] = file_format lowercase : int = Spark( df=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , working_dir=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) def __lowerCamelCase ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowercase : Optional[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=SCREAMING_SNAKE_CASE__ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
337
import argparse import pytorch_lightning as pl import torch from torch import nn from transformers import LongformerForQuestionAnswering, LongformerModel class __SCREAMING_SNAKE_CASE ( pl.LightningModule ): def __init__( self , SCREAMING_SNAKE_CASE__ ): super().__init__() lowercase : Any = model lowercase : Optional[Any] = 2 lowercase : Optional[int] = nn.Linear(self.model.config.hidden_size , self.num_labels ) def __lowerCamelCase ( self ): pass def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : str = LongformerModel.from_pretrained(_UpperCamelCase ) lowercase : int = LightningModel(_UpperCamelCase ) lowercase : Union[str, Any] = torch.load(_UpperCamelCase, map_location=torch.device('''cpu''' ) ) lightning_model.load_state_dict(ckpt['''state_dict'''] ) # init longformer question answering model lowercase : List[Any] = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase ) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() ) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() ) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(_UpperCamelCase ) print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--longformer_model''', default=None, type=str, required=True, help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''', ) parser.add_argument( '''--longformer_question_answering_ckpt_path''', default=None, type=str, required=True, help='''Path the official PyTorch Lightning Checkpoint.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) __a = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
337
1
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" while a != 0: lowercase , lowercase : int = b % a, a return b def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" if gcd(_UpperCamelCase, _UpperCamelCase ) != 1: lowercase : Tuple = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(_UpperCamelCase ) lowercase , lowercase , lowercase : Union[str, Any] = 1, 0, a lowercase , lowercase , lowercase : int = 0, 1, m while va != 0: lowercase : Optional[Any] = ua // va lowercase , lowercase , lowercase , lowercase , lowercase , lowercase : Optional[int] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
337
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { '''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''', # See all YOLOS models at https://huggingface.co/models?filter=yolos } class __SCREAMING_SNAKE_CASE ( A__ ): A : Any = 'yolos' def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=[512, 864] , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , **SCREAMING_SNAKE_CASE__ , ): super().__init__(**SCREAMING_SNAKE_CASE__ ) lowercase : Union[str, Any] = hidden_size lowercase : int = num_hidden_layers lowercase : str = num_attention_heads lowercase : str = intermediate_size lowercase : Dict = hidden_act lowercase : int = hidden_dropout_prob lowercase : Optional[Any] = attention_probs_dropout_prob lowercase : List[Any] = initializer_range lowercase : Optional[int] = layer_norm_eps lowercase : str = image_size lowercase : Dict = patch_size lowercase : str = num_channels lowercase : Optional[int] = qkv_bias lowercase : List[str] = num_detection_tokens lowercase : List[str] = use_mid_position_embeddings lowercase : Dict = auxiliary_loss # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : Any = bbox_cost lowercase : int = giou_cost # Loss coefficients lowercase : Dict = bbox_loss_coefficient lowercase : Optional[Any] = giou_loss_coefficient lowercase : Tuple = eos_coefficient class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = version.parse('1.11' ) @property def __lowerCamelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCamelCase ( self ): return 1E-4 @property def __lowerCamelCase ( self ): return 12
337
1
def __lowercase ( _UpperCamelCase = 100 ) ->int: """simple docstring""" lowercase : List[Any] = (n * (n + 1) // 2) ** 2 lowercase : Any = n * (n + 1) * (2 * n + 1) // 6 return sum_cubes - sum_squares if __name__ == "__main__": print(F'''{solution() = }''')
337
import importlib.metadata import operator import re import sys from typing import Optional from packaging import version __a = { '''<''': operator.lt, '''<=''': operator.le, '''==''': operator.eq, '''!=''': operator.ne, '''>=''': operator.ge, '''>''': operator.gt, } def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]: """simple docstring""" if got_ver is None or want_ver is None: raise ValueError( f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider""" f""" reinstalling {pkg}.""" ) if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ): raise ImportError( f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" ) def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None: """simple docstring""" lowercase : List[Any] = f"""\n{hint}""" if hint is not None else '''''' # non-versioned check if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ): lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None else: lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but''' f""" got {requirement}""" ) lowercase , lowercase : str = match[0] lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements lowercase : List[Any] = {} for w in want_range: lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase ) if not match: raise ValueError( '''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,''' f""" but got {requirement}""" ) lowercase , lowercase : Optional[int] = match[0] lowercase : Dict = want_ver if op not in ops: raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" ) # special case if pkg == "python": lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) return # check if any version is installed try: lowercase : List[str] = importlib.metadata.version(_UpperCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->int: """simple docstring""" lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main''' return require_version(_UpperCamelCase, _UpperCamelCase )
337
1
import os import re import shutil import sys import tempfile import unittest import black __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __a = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): lowercase : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) lowercase : Any = self.diffusers_dir shutil.copy( os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCamelCase ( self ): lowercase : List[Any] = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with a really long name lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
337
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __a = logging.get_logger(__name__) __a = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'deta' A : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ): if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = backbone_config.pop('''model_type''' ) lowercase : Any = CONFIG_MAPPING[backbone_model_type] lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = backbone_config lowercase : Union[str, Any] = num_queries lowercase : Any = max_position_embeddings lowercase : int = d_model lowercase : Any = encoder_ffn_dim lowercase : Optional[int] = encoder_layers lowercase : Tuple = encoder_attention_heads lowercase : Optional[Any] = decoder_ffn_dim lowercase : Optional[int] = decoder_layers lowercase : int = decoder_attention_heads lowercase : Any = dropout lowercase : int = attention_dropout lowercase : Dict = activation_dropout lowercase : int = activation_function lowercase : Dict = init_std lowercase : List[str] = init_xavier_std lowercase : Optional[Any] = encoder_layerdrop lowercase : Tuple = auxiliary_loss lowercase : Tuple = position_embedding_type # deformable attributes lowercase : List[str] = num_feature_levels lowercase : Tuple = encoder_n_points lowercase : Optional[int] = decoder_n_points lowercase : Tuple = two_stage lowercase : Optional[Any] = two_stage_num_proposals lowercase : Union[str, Any] = with_box_refine lowercase : Any = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : str = bbox_cost lowercase : List[Any] = giou_cost # Loss coefficients lowercase : Tuple = mask_loss_coefficient lowercase : Any = dice_loss_coefficient lowercase : Dict = bbox_loss_coefficient lowercase : Tuple = giou_loss_coefficient lowercase : Union[str, Any] = eos_coefficient lowercase : Tuple = focal_alpha super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCamelCase ( self ): return self.encoder_attention_heads @property def __lowerCamelCase ( self ): return self.d_model def __lowerCamelCase ( self ): lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Any = self.backbone_config.to_dict() lowercase : List[str] = self.__class__.model_type return output
337
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING __a = logging.get_logger(__name__) __a = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class __SCREAMING_SNAKE_CASE ( A__ ): A : List[str] = 'deta' A : Dict = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=900 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="relu" , SCREAMING_SNAKE_CASE__=256 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__="sine" , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=300 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.25 , **SCREAMING_SNAKE_CASE__ , ): if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowercase : Tuple = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] ) else: if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = backbone_config.pop('''model_type''' ) lowercase : Any = CONFIG_MAPPING[backbone_model_type] lowercase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = backbone_config lowercase : Union[str, Any] = num_queries lowercase : Any = max_position_embeddings lowercase : int = d_model lowercase : Any = encoder_ffn_dim lowercase : Optional[int] = encoder_layers lowercase : Tuple = encoder_attention_heads lowercase : Optional[Any] = decoder_ffn_dim lowercase : Optional[int] = decoder_layers lowercase : int = decoder_attention_heads lowercase : Any = dropout lowercase : int = attention_dropout lowercase : Dict = activation_dropout lowercase : int = activation_function lowercase : Dict = init_std lowercase : List[str] = init_xavier_std lowercase : Optional[Any] = encoder_layerdrop lowercase : Tuple = auxiliary_loss lowercase : Tuple = position_embedding_type # deformable attributes lowercase : List[str] = num_feature_levels lowercase : Tuple = encoder_n_points lowercase : Optional[int] = decoder_n_points lowercase : Tuple = two_stage lowercase : Optional[Any] = two_stage_num_proposals lowercase : Union[str, Any] = with_box_refine lowercase : Any = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('''If two_stage is True, with_box_refine must be True.''' ) # Hungarian matcher lowercase : Optional[Any] = class_cost lowercase : str = bbox_cost lowercase : List[Any] = giou_cost # Loss coefficients lowercase : Tuple = mask_loss_coefficient lowercase : Any = dice_loss_coefficient lowercase : Dict = bbox_loss_coefficient lowercase : Tuple = giou_loss_coefficient lowercase : Union[str, Any] = eos_coefficient lowercase : Tuple = focal_alpha super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) @property def __lowerCamelCase ( self ): return self.encoder_attention_heads @property def __lowerCamelCase ( self ): return self.d_model def __lowerCamelCase ( self ): lowercase : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase : Any = self.backbone_config.to_dict() lowercase : List[str] = self.__class__.model_type return output
337
def __lowercase ( ) ->List[Any]: """simple docstring""" lowercase : Union[str, Any] = 0 for i in range(1, 1001 ): total += i**i return str(_UpperCamelCase )[-10:] if __name__ == "__main__": print(solution())
337
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __a = { '''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''', '''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''', '''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''', '''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''', '''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''', '''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''', '''mask_downscaling.0''': '''mask_embed.conv1''', '''mask_downscaling.1''': '''mask_embed.layer_norm1''', '''mask_downscaling.3''': '''mask_embed.conv2''', '''mask_downscaling.4''': '''mask_embed.layer_norm2''', '''mask_downscaling.6''': '''mask_embed.conv3''', '''point_embeddings''': '''point_embed''', '''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''', '''image_encoder''': '''vision_encoder''', '''neck.0''': '''neck.conv1''', '''neck.1''': '''neck.layer_norm1''', '''neck.2''': '''neck.conv2''', '''neck.3''': '''neck.layer_norm2''', '''patch_embed.proj''': '''patch_embed.projection''', '''.norm''': '''.layer_norm''', '''blocks''': '''layers''', } def __lowercase ( _UpperCamelCase ) ->Tuple: """simple docstring""" lowercase : List[str] = {} state_dict.pop('''pixel_mean''', _UpperCamelCase ) state_dict.pop('''pixel_std''', _UpperCamelCase ) lowercase : List[Any] = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowercase : Any = key.replace(_UpperCamelCase, _UpperCamelCase ) if re.match(_UpperCamelCase, _UpperCamelCase ): lowercase : Dict = int(re.match(_UpperCamelCase, _UpperCamelCase ).group(2 ) ) if layer_nb == 0: lowercase : List[Any] = key.replace('''layers.0''', '''proj_in''' ) elif layer_nb == 1: lowercase : List[Any] = key.replace('''layers.1''', '''layers.0''' ) elif layer_nb == 2: lowercase : Optional[Any] = key.replace('''layers.2''', '''proj_out''' ) lowercase : Optional[Any] = value lowercase : Dict = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase="ybelkada/segment-anything" ) ->Optional[Any]: """simple docstring""" lowercase : List[Any] = hf_hub_download(_UpperCamelCase, f"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: lowercase : Dict = SamConfig() elif "sam_vit_l" in model_name: lowercase : Optional[int] = SamVisionConfig( hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], ) lowercase : Any = SamConfig( vision_config=_UpperCamelCase, ) elif "sam_vit_h" in model_name: lowercase : Union[str, Any] = SamVisionConfig( hidden_size=1280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], ) lowercase : List[str] = SamConfig( vision_config=_UpperCamelCase, ) lowercase : Dict = torch.load(_UpperCamelCase, map_location='''cpu''' ) lowercase : List[str] = replace_keys(_UpperCamelCase ) lowercase : Tuple = SamImageProcessor() lowercase : int = SamProcessor(image_processor=_UpperCamelCase ) lowercase : Optional[Any] = SamModel(_UpperCamelCase ) hf_model.load_state_dict(_UpperCamelCase ) lowercase : Dict = hf_model.to('''cuda''' ) lowercase : str = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' lowercase : int = Image.open(requests.get(_UpperCamelCase, stream=_UpperCamelCase ).raw ).convert('''RGB''' ) lowercase : Union[str, Any] = [[[400, 650]]] lowercase : List[Any] = [[1]] lowercase : List[str] = processor(images=np.array(_UpperCamelCase ), return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowercase : List[str] = hf_model(**_UpperCamelCase ) lowercase : Any = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8 lowercase : int = processor( images=np.array(_UpperCamelCase ), input_points=_UpperCamelCase, input_labels=_UpperCamelCase, return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowercase : str = hf_model(**_UpperCamelCase ) lowercase : int = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4 lowercase : Tuple = ((75, 275, 1725, 850),) lowercase : Dict = processor(images=np.array(_UpperCamelCase ), input_boxes=_UpperCamelCase, return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowercase : int = hf_model(**_UpperCamelCase ) lowercase : Dict = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4 # Test with 2 points and 1 image. lowercase : Optional[Any] = [[[400, 650], [800, 650]]] lowercase : int = [[1, 1]] lowercase : Dict = processor( images=np.array(_UpperCamelCase ), input_points=_UpperCamelCase, input_labels=_UpperCamelCase, return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowercase : Optional[int] = hf_model(**_UpperCamelCase ) lowercase : str = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2 if __name__ == "__main__": __a = argparse.ArgumentParser() __a = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195'''] parser.add_argument( '''--model_name''', default='''sam_vit_h_4b8939''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) parser.add_argument( '''--model_hub_id''', default='''ybelkada/segment-anything''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) __a = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
337
import os import re import shutil import sys import tempfile import unittest import black __a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __a = ''' \""" Output class for the scheduler\'s step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \""" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None ''' class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCamelCase ( self ): lowercase : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) ) lowercase : Any = self.diffusers_dir shutil.copy( os.path.join(SCREAMING_SNAKE_CASE__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , ) def __lowerCamelCase ( self ): lowercase : List[Any] = '''src/diffusers''' shutil.rmtree(self.diffusers_dir ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ): lowercase : Tuple = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: lowercase : str = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result lowercase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) lowercase : List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ ) lowercase : Dict = os.path.join(self.diffusers_dir , '''new_code.py''' ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , newline='''\n''' ) as f: f.write(SCREAMING_SNAKE_CASE__ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , '''r''' ) as f: self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Tuple = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ) self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): # Base copy consistency self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , ) # With no empty line at the end self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , ) # Copy consistency with rename self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with a really long name lowercase : List[Any] = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason''' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , ) # Copy consistency with overwrite self.check_copy_consistency( '''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , SCREAMING_SNAKE_CASE__ ) , )
337
1
from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ , ): lowercase : Optional[Any] = parent lowercase : Any = 13 lowercase : List[str] = 7 lowercase : Union[str, Any] = True lowercase : int = True lowercase : int = True lowercase : int = 99 lowercase : Optional[Any] = 32 lowercase : List[Any] = 2 lowercase : Any = 4 lowercase : List[str] = 37 lowercase : Any = '''gelu''' lowercase : Optional[Any] = 0.1 lowercase : Optional[Any] = 0.1 lowercase : List[str] = 512 lowercase : List[str] = 16 lowercase : List[Any] = 2 lowercase : Tuple = 0.02 lowercase : List[Any] = 3 lowercase : str = 4 lowercase : Dict = None def __lowerCamelCase ( self ): lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Union[str, Any] = None if self.use_input_mask: lowercase : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Any = None lowercase : Tuple = None lowercase : List[Any] = None if self.use_labels: lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : int = ids_tensor([self.batch_size] , self.num_choices ) lowercase : Dict = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : str = self.prepare_config_and_inputs() lowercase : List[str] = True lowercase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = TFEsmModel(config=SCREAMING_SNAKE_CASE__ ) lowercase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = [input_ids, input_mask] lowercase : List[str] = model(SCREAMING_SNAKE_CASE__ ) lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ): lowercase : List[str] = True lowercase : List[Any] = TFEsmModel(config=SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''encoder_hidden_states''': encoder_hidden_states, '''encoder_attention_mask''': encoder_attention_mask, } lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ) lowercase : List[str] = [input_ids, input_mask] lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ ) # Also check the case where encoder outputs are not passed lowercase : Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[Any] = TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE__ ) lowercase : int = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : Optional[int] = self.num_labels lowercase : List[str] = TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self ): lowercase : Optional[Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : List[str] = config_and_inputs lowercase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): A : Any = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) A : List[str] = ( { 'feature-extraction': TFEsmModel, 'fill-mask': TFEsmForMaskedLM, 'text-classification': TFEsmForSequenceClassification, 'token-classification': TFEsmForTokenClassification, 'zero-shot': TFEsmForSequenceClassification, } if is_tf_available() else {} ) A : Optional[Any] = False A : Optional[Any] = False def __lowerCamelCase ( self ): lowercase : Optional[Any] = TFEsmModelTester(self ) lowercase : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ ) @slow def __lowerCamelCase ( self ): for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : str = TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): pass @unittest.skip('''Protein models do not support embedding resizing.''' ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Tuple = model_class(SCREAMING_SNAKE_CASE__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer lowercase : Optional[Any] = model.get_bias() assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for k, v in name.items(): assert isinstance(SCREAMING_SNAKE_CASE__ , tf.Variable ) else: lowercase : Optional[Any] = model.get_output_embeddings() assert x is None lowercase : Tuple = model.get_bias() assert name is None @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __lowerCamelCase ( self ): lowercase : Optional[Any] = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowercase : str = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowercase : int = model(SCREAMING_SNAKE_CASE__ )[0] lowercase : int = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , SCREAMING_SNAKE_CASE__ ) # compare the actual values for a slice. lowercase : Optional[Any] = tf.constant( [ [ [8.921518, -10.589814, -6.4671307], [-6.3967156, -13.911377, -1.1211915], [-7.781247, -13.951557, -3.740592], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def __lowerCamelCase ( self ): lowercase : Dict = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) lowercase : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase : List[Any] = model(SCREAMING_SNAKE_CASE__ )[0] # compare the actual values for a slice. lowercase : int = tf.constant( [ [ [0.14443092, 0.54125327, 0.3247739], [0.30340484, 0.00526676, 0.31077722], [0.32278043, -0.24987096, 0.3414628], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
337
import math class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1 lowercase : List[Any] = n lowercase : List[Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # adjacency matrix for weight lowercase : Union[str, Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # dp[i][j] stores minimum distance from i to j def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : int = w def __lowerCamelCase ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.dp[u][v] if __name__ == "__main__": __a = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
337
1
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->Any: """simple docstring""" if b == 0: return 1 if (b % 2) == 0: return actual_power(_UpperCamelCase, int(b / 2 ) ) * actual_power(_UpperCamelCase, int(b / 2 ) ) else: return a * actual_power(_UpperCamelCase, int(b / 2 ) ) * actual_power(_UpperCamelCase, int(b / 2 ) ) def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->float: """simple docstring""" if b < 0: return 1 / actual_power(_UpperCamelCase, _UpperCamelCase ) return actual_power(_UpperCamelCase, _UpperCamelCase ) if __name__ == "__main__": print(power(-2, -3))
337
from __future__ import annotations def __lowercase ( _UpperCamelCase ) ->float: """simple docstring""" if not nums: raise ValueError('''List is empty''' ) return sum(_UpperCamelCase ) / len(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
337
1
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->int: """simple docstring""" assert x is not None assert y is not None lowercase : str = len(_UpperCamelCase ) lowercase : Optional[Any] = len(_UpperCamelCase ) # declaring the array for storing the dp values lowercase : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741 for i in range(1, m + 1 ): for j in range(1, n + 1 ): lowercase : str = 1 if x[i - 1] == y[j - 1] else 0 lowercase : Optional[Any] = max(l[i - 1][j], l[i][j - 1], l[i - 1][j - 1] + match ) lowercase : Optional[Any] = '''''' lowercase , lowercase : str = m, n while i > 0 and j > 0: lowercase : List[str] = 1 if x[i - 1] == y[j - 1] else 0 if l[i][j] == l[i - 1][j - 1] + match: if match == 1: lowercase : Optional[Any] = x[i - 1] + seq i -= 1 j -= 1 elif l[i][j] == l[i - 1][j]: i -= 1 else: j -= 1 return l[m][n], seq if __name__ == "__main__": __a = '''AGGTAB''' __a = '''GXTXAYB''' __a = 4 __a = '''GTAB''' __a , __a = longest_common_subsequence(a, b) print('''len =''', ln, ''', sub-sequence =''', subseq) import doctest doctest.testmod()
337
import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __a = logging.get_logger(__name__) class __SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ): warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
337
1
import math class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__=0 ): # a graph with Node 0,1,...,N-1 lowercase : List[Any] = n lowercase : List[Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # adjacency matrix for weight lowercase : Union[str, Any] = [ [math.inf for j in range(0 , SCREAMING_SNAKE_CASE__ )] for i in range(0 , SCREAMING_SNAKE_CASE__ ) ] # dp[i][j] stores minimum distance from i to j def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase : int = w def __lowerCamelCase ( self ): for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowercase : Any = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): return self.dp[u][v] if __name__ == "__main__": __a = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
337
from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging __a = logging.get_logger(__name__) def __lowercase ( _UpperCamelCase ) ->List[int]: """simple docstring""" if isinstance(_UpperCamelCase, np.ndarray ): return list(tensor.shape ) lowercase : Optional[Any] = tf.shape(_UpperCamelCase ) if tensor.shape == tf.TensorShape(_UpperCamelCase ): return dynamic lowercase : Tuple = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(_UpperCamelCase )] def __lowercase ( _UpperCamelCase, _UpperCamelCase = None, _UpperCamelCase = None ) ->tf.Tensor: """simple docstring""" return tf.nn.softmax(logits=logits + 1e-9, axis=_UpperCamelCase, name=_UpperCamelCase ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase=1e-5, _UpperCamelCase=-1 ) ->int: """simple docstring""" if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_UpperCamelCase, _UpperCamelCase ): raise NotImplementedError('''Only 1D weight and bias tensors are supported for now, with only a single axis.''' ) # Get mean and variance on the axis to be normalized lowercase , lowercase : Union[str, Any] = tf.nn.moments(_UpperCamelCase, axes=[axis], keepdims=_UpperCamelCase ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis lowercase : int = [1] * inputs.shape.rank lowercase : Union[str, Any] = shape_list(_UpperCamelCase )[axis] lowercase : List[str] = tf.reshape(_UpperCamelCase, _UpperCamelCase ) lowercase : Dict = tf.reshape(_UpperCamelCase, _UpperCamelCase ) # Compute layer normalization using the batch_normalization # function. lowercase : List[str] = tf.nn.batch_normalization( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, offset=_UpperCamelCase, scale=_UpperCamelCase, variance_epsilon=_UpperCamelCase, ) return outputs def __lowercase ( _UpperCamelCase, _UpperCamelCase=0, _UpperCamelCase=-1 ) ->List[Any]: """simple docstring""" if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input lowercase : Dict = tf.shape(_UpperCamelCase ) lowercase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) lowercase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]], axis=0 ) return tf.reshape(_UpperCamelCase, _UpperCamelCase ) def __lowercase ( _UpperCamelCase ) ->tf.Tensor: """simple docstring""" if not isinstance(_UpperCamelCase, tf.Tensor ): lowercase : Optional[Any] = tf.convert_to_tensor(_UpperCamelCase ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: lowercase : Tuple = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: lowercase : List[Any] = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) lowercase : str = ( tf.cast(1, encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = "input_ids" ) ->None: """simple docstring""" tf.debugging.assert_less( _UpperCamelCase, tf.cast(_UpperCamelCase, dtype=tensor.dtype ), message=( f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_UpperCamelCase )}) must be smaller than the embedding """ f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time.""" ), ) def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]: """simple docstring""" lowercase : List[Any] = 64512 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. lowercase : Optional[int] = [x for x in data if len(_UpperCamelCase ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( '''The following attributes cannot be saved to HDF5 file because ''' f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """ f"""bytes: {bad_attributes}""" ) lowercase : Any = np.asarray(_UpperCamelCase ) lowercase : List[Any] = 1 lowercase : Tuple = np.array_split(_UpperCamelCase, _UpperCamelCase ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 lowercase : Dict = np.array_split(_UpperCamelCase, _UpperCamelCase ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(_UpperCamelCase ): lowercase : Optional[int] = chunk_data else: lowercase : int = data def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[str]: """simple docstring""" if name in group.attrs: lowercase : str = [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs[name]] else: lowercase : Optional[Any] = [] lowercase : List[str] = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('''utf8''' ) if hasattr(_UpperCamelCase, '''decode''' ) else n for n in group.attrs['''%s%d''' % (name, chunk_id)]] ) chunk_id += 1 return data def __lowercase ( _UpperCamelCase ) ->List[str]: """simple docstring""" def _expand_single_ad_tensor(_UpperCamelCase ): if isinstance(_UpperCamelCase, tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(_UpperCamelCase, axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor, _UpperCamelCase )
337
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class __SCREAMING_SNAKE_CASE : def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ): lowercase : Optional[Any] = parent lowercase : Optional[int] = batch_size lowercase : List[str] = seq_length lowercase : Dict = is_training lowercase : str = use_token_type_ids lowercase : str = use_labels lowercase : Any = vocab_size lowercase : int = hidden_size lowercase : Tuple = num_hidden_layers lowercase : str = num_attention_heads lowercase : List[Any] = intermediate_size lowercase : Tuple = hidden_act lowercase : List[Any] = hidden_dropout_prob lowercase : Optional[int] = attention_probs_dropout_prob lowercase : Any = max_position_embeddings lowercase : Dict = type_vocab_size lowercase : Union[str, Any] = type_sequence_label_size lowercase : Optional[Any] = initializer_range lowercase : Any = num_labels lowercase : Optional[Any] = num_choices lowercase : List[Any] = scope lowercase : List[Any] = self.vocab_size - 1 def __lowerCamelCase ( self ): lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Optional[Any] = None if self.use_token_type_ids: lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Union[str, Any] = None lowercase : Dict = None lowercase : List[Any] = None if self.use_labels: lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) lowercase : Optional[Any] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) lowercase : Optional[int] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = OpenAIGPTModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ ) lowercase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ ) lowercase : Any = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ): lowercase : Tuple = OpenAIGPTLMHeadModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ): lowercase : int = OpenAIGPTDoubleHeadsModel(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Any = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ): lowercase : List[Any] = self.num_labels lowercase : Tuple = OpenAIGPTForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase : Dict = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Tuple = config_and_inputs lowercase : Tuple = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class __SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ): A : Union[str, Any] = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) A : Dict = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly A : List[str] = ( { 'feature-extraction': OpenAIGPTModel, 'text-classification': OpenAIGPTForSequenceClassification, 'text-generation': OpenAIGPTLMHeadModel, 'zero-shot': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ): lowercase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": lowercase : str = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) lowercase : Dict = inputs_dict['''labels'''] lowercase : List[Any] = inputs_dict['''labels'''] lowercase : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ , ) lowercase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def __lowerCamelCase ( self ): lowercase : Tuple = OpenAIGPTModelTester(self ) lowercase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , n_embd=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*SCREAMING_SNAKE_CASE__ ) def __lowerCamelCase ( self ): lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) @slow def __lowerCamelCase ( self ): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : Dict = OpenAIGPTModel.from_pretrained(SCREAMING_SNAKE_CASE__ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE__ ) @require_torch class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def __lowerCamelCase ( self ): lowercase : Dict = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(SCREAMING_SNAKE_CASE__ ) lowercase : Dict = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ ) # the president is lowercase : Dict = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the lowercase : Optional[int] = model.generate(SCREAMING_SNAKE_CASE__ , do_sample=SCREAMING_SNAKE_CASE__ ) self.assertListEqual(output_ids[0].tolist() , SCREAMING_SNAKE_CASE__ )
337
def __lowercase ( _UpperCamelCase = 4000000 ) ->int: """simple docstring""" lowercase : int = [] lowercase , lowercase : str = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(_UpperCamelCase ) lowercase , lowercase : Dict = b, a + b return sum(_UpperCamelCase ) if __name__ == "__main__": print(F'''{solution() = }''')
337
1